From dd506e23726c57930eadd232f5deebf1ccd1171a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 4 Nov 2024 15:37:29 +0100 Subject: [PATCH 01/42] Bump github.com/hashicorp/terraform-json from 0.22.1 to 0.23.0 (#1877) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/hashicorp/terraform-json](https://github.com/hashicorp/terraform-json) from 0.22.1 to 0.23.0.
Release notes

Sourced from github.com/hashicorp/terraform-json's releases.

v0.23.0

ENHANCEMENTS:

INTERNAL:

Full Changelog: https://github.com/hashicorp/terraform-json/compare/v0.22.1...v0.23.0

Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/hashicorp/terraform-json&package-manager=go_modules&previous-version=0.22.1&new-version=0.23.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 91a9c3038..df90c6057 100644 --- a/go.mod +++ b/go.mod @@ -14,7 +14,7 @@ require ( github.com/hashicorp/go-version v1.7.0 // MPL 2.0 github.com/hashicorp/hc-install v0.9.0 // MPL 2.0 github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0 - github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0 + github.com/hashicorp/terraform-json v0.23.0 // MPL 2.0 github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT github.com/nwidger/jsoncolor v0.3.2 // MIT @@ -56,7 +56,7 @@ require ( github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/stretchr/objx v0.5.2 // indirect - github.com/zclconf/go-cty v1.14.4 // indirect + github.com/zclconf/go-cty v1.15.0 // indirect go.opencensus.io v0.24.0 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect go.opentelemetry.io/otel v1.24.0 // indirect diff --git a/go.sum b/go.sum index c47ae7693..11a40d461 100644 --- a/go.sum +++ b/go.sum @@ -109,8 +109,8 @@ github.com/hashicorp/hc-install v0.9.0 h1:2dIk8LcvANwtv3QZLckxcjyF5w8KVtiMxu6G6e github.com/hashicorp/hc-install v0.9.0/go.mod h1:+6vOP+mf3tuGgMApVYtmsnDoKWMDcFXeTxCACYZ8SFg= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= -github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= -github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= +github.com/hashicorp/terraform-json v0.23.0 h1:sniCkExU4iKtTADReHzACkk8fnpQXrdD2xoR+lppBkI= +github.com/hashicorp/terraform-json v0.23.0/go.mod h1:MHdXbBAbSg0GvzuWazEGKAn/cyNfIB7mN6y7KJN6y2c= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -160,8 +160,8 @@ github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsT github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= -github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= +github.com/zclconf/go-cty v1.15.0/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= From edff68c7637c3aae4c854b5cfd6858413beed90a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 5 Nov 2024 10:30:11 +0100 Subject: [PATCH 02/42] Fix bundle run when run interactively (#1880) ## Changes The commit where resource lookup was factored out into a separate package (#1858) didn't take into account the use of `args` further down in the code. This change fixes that oversight by returning the tail arguments when determining which resource to run. The later call no longer has to index the `args` slice. ## Tests Manually confirmed that the command works when being prompted for the resource to run. --- cmd/bundle/run.go | 20 +++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index 96851d0c0..7a92766d9 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -35,17 +35,23 @@ func promptRunArgument(ctx context.Context, b *bundle.Bundle) (string, error) { return key, nil } -func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, error) { +// resolveRunArgument resolves the resource key to run. +// It returns the remaining arguments to pass to the runner, if applicable. +func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (string, []string, error) { // If no arguments are specified, prompt the user to select something to run. if len(args) == 0 && cmdio.IsPromptSupported(ctx) { - return promptRunArgument(ctx, b) + key, err := promptRunArgument(ctx, b) + if err != nil { + return "", nil, err + } + return key, args, nil } if len(args) < 1 { - return "", fmt.Errorf("expected a KEY of the resource to run") + return "", nil, fmt.Errorf("expected a KEY of the resource to run") } - return args[0], nil + return args[0], args[1:], nil } func keyToRunner(b *bundle.Bundle, arg string) (run.Runner, error) { @@ -109,7 +115,7 @@ task or a Python wheel task, the second example applies. return err } - arg, err := resolveRunArgument(ctx, b, args) + key, args, err := resolveRunArgument(ctx, b, args) if err != nil { return err } @@ -124,13 +130,13 @@ task or a Python wheel task, the second example applies. return err } - runner, err := keyToRunner(b, arg) + runner, err := keyToRunner(b, key) if err != nil { return err } // Parse additional positional arguments. - err = runner.ParseArgs(args[1:], &runOptions) + err = runner.ParseArgs(args, &runOptions) if err != nil { return err } From 26afab2ccb5e5c5a7bc3c9f520c917ec19f46045 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 5 Nov 2024 10:53:53 +0100 Subject: [PATCH 03/42] Fix relative path resolution for dashboards on Windows (#1881) ## Changes The file presence check for dashboard files was missing a `filepath.ToSlash`. This means it didn't work on Windows unless the dashboard was located at a path without slashes (i.e. the bundle root). Closes #1875. ## Tests * Added a unit test to cover this case (failed before the fix). * Manually ran a dashboard deployment on Windows. --- bundle/config/mutator/translate_paths.go | 2 +- .../translate_paths_dashboards_test.go | 54 +++++++++++++++++++ 2 files changed, 55 insertions(+), 1 deletion(-) create mode 100644 bundle/config/mutator/translate_paths_dashboards_test.go diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 82b0b3caa..321fa5b30 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -163,7 +163,7 @@ func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, r } func (t *translateContext) retainLocalAbsoluteFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - info, err := t.b.SyncRoot.Stat(localRelPath) + info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath)) if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("file %s not found", literal) } diff --git a/bundle/config/mutator/translate_paths_dashboards_test.go b/bundle/config/mutator/translate_paths_dashboards_test.go new file mode 100644 index 000000000..c386f1bbe --- /dev/null +++ b/bundle/config/mutator/translate_paths_dashboards_test.go @@ -0,0 +1,54 @@ +package mutator_test + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/vfs" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestTranslatePathsDashboards_FilePathRelativeSubDirectory(t *testing.T) { + dir := t.TempDir() + touchEmptyFile(t, filepath.Join(dir, "src", "my_dashboard.lvdash.json")) + + b := &bundle.Bundle{ + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), + Config: config.Root{ + Resources: config.Resources{ + Dashboards: map[string]*resources.Dashboard{ + "dashboard": { + CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + DisplayName: "My Dashboard", + }, + FilePath: "../src/my_dashboard.lvdash.json", + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.dashboards", []dyn.Location{{ + File: filepath.Join(dir, "resources/dashboard.yml"), + }}) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) + + // Assert that the file path for the dashboard has been converted to its local absolute path. + assert.Equal( + t, + filepath.Join(dir, "src", "my_dashboard.lvdash.json"), + b.Config.Resources.Dashboards["dashboard"].FilePath, + ) +} From b81008e2f64d3ee9a29338f4e42032cb56630e86 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 5 Nov 2024 20:59:27 +0530 Subject: [PATCH 04/42] Clean host URL in the `auth login` command (#1879) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes The host URL for databricks workspaces includes the workspaceId by default as a positional arg. Eg: https://e2-dogfood.staging.cloud.databricks.com/?o=1234 Thus a user can't simply copy paste the URL today to the auth login command. They'll see a runtime error: ``` ➜ cli git:(main) ✗ databricks auth login --host https://e2-dogfood.staging.cloud.databricks.com/\?o\=xxx --profile new-dg Error: oidc: fetch .well-known: failed to unmarshal response body: invalid character '<' looking for beginning of value. This is likely a bug in the Databricks SDK for Go or the underlying REST API. Please report this issue with the following debugging information to the SDK issue tracker at https://github.com/databricks/databricks-sdk-go/issues. Request log: GET /login.html ... ``` ## Tests Unit tests and manually. Now auth login works even when the workspace_id is included in the URL. --- libs/auth/oauth.go | 24 ++++++++++++++++++++++++ libs/auth/oauth_test.go | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+) diff --git a/libs/auth/oauth.go b/libs/auth/oauth.go index 7c1cb9576..026c45468 100644 --- a/libs/auth/oauth.go +++ b/libs/auth/oauth.go @@ -9,6 +9,7 @@ import ( "errors" "fmt" "net" + "net/url" "strings" "time" @@ -143,6 +144,26 @@ func (a *PersistentAuth) Challenge(ctx context.Context) error { return nil } +// This function cleans up the host URL by only retaining the scheme and the host. +// This function thus removes any path, query arguments, or fragments from the URL. +func (a *PersistentAuth) cleanHost() { + parsedHost, err := url.Parse(a.Host) + if err != nil { + return + } + // when either host or scheme is empty, we don't want to clean it. This is because + // the Go url library parses a raw "abc" string as the path of a URL and cleaning + // it will return thus return an empty string. + if parsedHost.Host == "" || parsedHost.Scheme == "" { + return + } + host := url.URL{ + Scheme: parsedHost.Scheme, + Host: parsedHost.Host, + } + a.Host = host.String() +} + func (a *PersistentAuth) init(ctx context.Context) error { if a.Host == "" && a.AccountID == "" { return ErrFetchCredentials @@ -156,6 +177,9 @@ func (a *PersistentAuth) init(ctx context.Context) error { if a.browser == nil { a.browser = browser.OpenURL } + + a.cleanHost() + // try acquire listener, which we also use as a machine-local // exclusive lock to prevent token cache corruption in the scope // of developer machine, where this command runs. diff --git a/libs/auth/oauth_test.go b/libs/auth/oauth_test.go index ea6a8061e..fdf0d04bf 100644 --- a/libs/auth/oauth_test.go +++ b/libs/auth/oauth_test.go @@ -228,3 +228,37 @@ func TestChallengeFailed(t *testing.T) { assert.EqualError(t, err, "authorize: access_denied: Policy evaluation failed for this request") }) } + +func TestPersistentAuthCleanHost(t *testing.T) { + for _, tcases := range []struct { + in string + out string + }{ + {"https://example.com", "https://example.com"}, + {"https://example.com/", "https://example.com"}, + {"https://example.com/path", "https://example.com"}, + {"https://example.com/path/subpath", "https://example.com"}, + {"https://example.com/path?query=1", "https://example.com"}, + {"https://example.com/path?query=1&other=2", "https://example.com"}, + {"https://example.com/path#fragment", "https://example.com"}, + {"https://example.com/path?query=1#fragment", "https://example.com"}, + {"https://example.com/path?query=1&other=2#fragment", "https://example.com"}, + {"https://example.com/path/subpath?query=1", "https://example.com"}, + {"https://example.com/path/subpath?query=1&other=2", "https://example.com"}, + {"https://example.com/path/subpath#fragment", "https://example.com"}, + {"https://example.com/path/subpath?query=1#fragment", "https://example.com"}, + {"https://example.com/path/subpath?query=1&other=2#fragment", "https://example.com"}, + {"https://example.com/path?query=1%20value&other=2%20value", "https://example.com"}, + {"http://example.com/path/subpath?query=1%20value&other=2%20value", "http://example.com"}, + + // URLs without scheme should be left as is + {"abc", "abc"}, + {"abc.com/def", "abc.com/def"}, + } { + p := &PersistentAuth{ + Host: tcases.in, + } + p.cleanHost() + assert.Equal(t, tcases.out, p.Host) + } +} From b6a376bf8a917fa92c018870009b0296b026ca70 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 6 Nov 2024 15:03:54 +0100 Subject: [PATCH 05/42] [Release] Release v0.233.0 (#1886) CLI: * Clean host URL in the `auth login` command ([#1879](https://github.com/databricks/cli/pull/1879)). Bundles: * Fix bundle run when run interactively ([#1880](https://github.com/databricks/cli/pull/1880)). * Fix relative path resolution for dashboards on Windows ([#1881](https://github.com/databricks/cli/pull/1881)). Internal: * Address goreleaser deprecation warning ([#1872](https://github.com/databricks/cli/pull/1872)). * Update actions/github-script to v7 ([#1873](https://github.com/databricks/cli/pull/1873)). * Use Go 1.23 ([#1871](https://github.com/databricks/cli/pull/1871)). * [Internal] Always write message for manual integration test trigger ([#1874](https://github.com/databricks/cli/pull/1874)). * Add `cmd-exec-id` to user agent ([#1808](https://github.com/databricks/cli/pull/1808)). * Added E2E test to run Python wheels on interactive cluster created in bundle ([#1864](https://github.com/databricks/cli/pull/1864)). Dependency updates: * Bump github.com/hashicorp/terraform-json from 0.22.1 to 0.23.0 ([#1877](https://github.com/databricks/cli/pull/1877)). --- CHANGELOG.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 639270e32..9b08d7514 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,26 @@ # Version changelog +## [Release] Release v0.233.0 + +CLI: + * Clean host URL in the `auth login` command ([#1879](https://github.com/databricks/cli/pull/1879)). + +Bundles: + * Fix bundle run when run interactively ([#1880](https://github.com/databricks/cli/pull/1880)). + * Fix relative path resolution for dashboards on Windows ([#1881](https://github.com/databricks/cli/pull/1881)). + +Internal: + * Address goreleaser deprecation warning ([#1872](https://github.com/databricks/cli/pull/1872)). + * Update actions/github-script to v7 ([#1873](https://github.com/databricks/cli/pull/1873)). + * Use Go 1.23 ([#1871](https://github.com/databricks/cli/pull/1871)). + * [Internal] Always write message for manual integration test trigger ([#1874](https://github.com/databricks/cli/pull/1874)). + * Add `cmd-exec-id` to user agent ([#1808](https://github.com/databricks/cli/pull/1808)). + * Added E2E test to run Python wheels on interactive cluster created in bundle ([#1864](https://github.com/databricks/cli/pull/1864)). + + +Dependency updates: + * Bump github.com/hashicorp/terraform-json from 0.22.1 to 0.23.0 ([#1877](https://github.com/databricks/cli/pull/1877)). + ## [Release] Release v0.232.1 This patch release fixes the following error observed when deploying to /Shared root folder From 162aa212bc271c502adcbf9d6f80285838666a5d Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 7 Nov 2024 10:31:49 +0100 Subject: [PATCH 06/42] Do not execute build on bundle destroy (#1882) ## Changes There's no value in building artifacts on destroy because they are just removed from workspace as part of destroy. --- cmd/bundle/destroy.go | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index cd7e63062..711abbcd7 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -6,6 +6,7 @@ import ( "os" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" @@ -62,7 +63,12 @@ func newDestroyCommand() *cobra.Command { diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), - phases.Build(), + // We need to resolve artifact variable (how we do it in build phase) + // because some of the to-be-destroyed resource might use this variable. + // Not resolving might lead to terraform "Reference to undeclared resource" error + mutator.ResolveVariableReferences( + "artifacts", + ), phases.Destroy(), )) if err := diags.Error(); err != nil { From 6a28ae0beaea377b0efa29c2b95b64bb54332bdc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 17:11:43 +0530 Subject: [PATCH 07/42] Bump golang.org/x/sync from 0.8.0 to 0.9.0 (#1892) Bumps [golang.org/x/sync](https://github.com/golang/sync) from 0.8.0 to 0.9.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/sync&package-manager=go_modules&previous-version=0.8.0&new-version=0.9.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index df90c6057..fed8e5d2e 100644 --- a/go.mod +++ b/go.mod @@ -26,7 +26,7 @@ require ( golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.21.0 golang.org/x/oauth2 v0.23.0 - golang.org/x/sync v0.8.0 + golang.org/x/sync v0.9.0 golang.org/x/term v0.25.0 golang.org/x/text v0.19.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 diff --git a/go.sum b/go.sum index 11a40d461..666e9fef6 100644 --- a/go.sum +++ b/go.sum @@ -200,8 +200,8 @@ golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbht golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.8.0 h1:3NFvSEYkUoMifnESzZl15y791HH1qU2xm6eCJU5ZPXQ= -golang.org/x/sync v0.8.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ= +golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From bc96872b18c39796e7dbcbadacec4889d338596a Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 17:12:07 +0530 Subject: [PATCH 08/42] Bump golang.org/x/text from 0.19.0 to 0.20.0 (#1893) Bumps [golang.org/x/text](https://github.com/golang/text) from 0.19.0 to 0.20.0.
Commits
  • efd25da go.mod: update golang.org/x dependencies
  • 8a0e65e README: don't recommend go get
  • fefda1a internal/texttest: remove Run and Bench helpers
  • a457f47 all: normalize subtest names to NFC
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/text&package-manager=go_modules&previous-version=0.19.0&new-version=0.20.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fed8e5d2e..1edcc5e7a 100644 --- a/go.mod +++ b/go.mod @@ -28,7 +28,7 @@ require ( golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.9.0 golang.org/x/term v0.25.0 - golang.org/x/text v0.19.0 + golang.org/x/text v0.20.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 ) diff --git a/go.sum b/go.sum index 666e9fef6..2e3d2f39d 100644 --- a/go.sum +++ b/go.sum @@ -218,8 +218,8 @@ golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.19.0 h1:kTxAhCbGbxhK0IwgSKiMO5awPoDQ0RpfiVYBfK860YM= -golang.org/x/text v0.19.0/go.mod h1:BuEKDfySbSR4drPmRPG/7iBdf8hvFMuRexcpahXilzY= +golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= +golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= From 1e1aaddfa45de1c2673bbe1eb20b887f3dd79c6b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 17:12:54 +0530 Subject: [PATCH 09/42] Bump golang.org/x/mod from 0.21.0 to 0.22.0 (#1895) Bumps [golang.org/x/mod](https://github.com/golang/mod) from 0.21.0 to 0.22.0.
Commits
  • dec0365 sumdb: make data tiles by Server compatible with sum.golang.org
  • c8a7319 x/mod: fix handling of vendored packages with '/vendor' in non-top-level paths
  • 9cd0e4c x/mod: remove vendor/modules.txt from module download
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/mod&package-manager=go_modules&previous-version=0.21.0&new-version=0.22.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 1edcc5e7a..fa9ae1bd3 100644 --- a/go.mod +++ b/go.mod @@ -24,7 +24,7 @@ require ( github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/mod v0.21.0 + golang.org/x/mod v0.22.0 golang.org/x/oauth2 v0.23.0 golang.org/x/sync v0.9.0 golang.org/x/term v0.25.0 diff --git a/go.sum b/go.sum index 2e3d2f39d..24b9f0ef1 100644 --- a/go.sum +++ b/go.sum @@ -184,8 +184,8 @@ golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0= -golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= +golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From f80ebe15f8b66977b184481bffcaefe47eb41e16 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 11 Nov 2024 17:28:19 +0530 Subject: [PATCH 10/42] Bump golang.org/x/oauth2 from 0.23.0 to 0.24.0 (#1894) Bumps [golang.org/x/oauth2](https://github.com/golang/oauth2) from 0.23.0 to 0.24.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/oauth2&package-manager=go_modules&previous-version=0.23.0&new-version=0.24.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index fa9ae1bd3..e33214ebb 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.22.0 - golang.org/x/oauth2 v0.23.0 + golang.org/x/oauth2 v0.24.0 golang.org/x/sync v0.9.0 golang.org/x/term v0.25.0 golang.org/x/text v0.20.0 diff --git a/go.sum b/go.sum index 24b9f0ef1..419fa5681 100644 --- a/go.sum +++ b/go.sum @@ -195,8 +195,8 @@ golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= -golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE= +golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= From 25838ee0afa62c9e1bef66ef5462465b308b7a5b Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Wed, 13 Nov 2024 14:40:53 +0100 Subject: [PATCH 11/42] Bump github.com/databricks/databricks-sdk-go from 0.49.0 to 0.51.0 (#1878) Known issues: - [ ] _(non-blocking with a command override)_ `apps.Update` requires 2 `name` params (one from path, one from request body) - [ ] _(non-blocking)_ `lakeview.Create` does not require positional argument `display_name` anymore because it's not marked as required in request body Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.49.0 to 0.51.0. --------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .codegen/service.go.tmpl | 51 +- .gitattributes | 4 +- bundle/config/mutator/apply_presets.go | 2 +- .../configure_dashboard_defaults_test.go | 4 +- bundle/config/mutator/initialize_urls_test.go | 2 +- .../mutator/process_target_mode_test.go | 2 +- .../translate_paths_dashboards_test.go | 2 +- bundle/config/resources/dashboard.go | 2 +- ...check_dashboards_modified_remotely_test.go | 2 +- bundle/deploy/terraform/convert_test.go | 6 +- .../terraform/tfdyn/convert_dashboard_test.go | 2 +- bundle/schema/embed_test.go | 11 +- bundle/schema/jsonschema.json | 342 +++++++---- .../service-principal-secrets.go | 2 + cmd/account/workspaces/workspaces.go | 2 + .../aibi-dashboard-embedding-access-policy.go | 162 ++++++ ...bi-dashboard-embedding-approved-domains.go | 162 ++++++ cmd/workspace/apps/apps.go | 51 +- cmd/workspace/apps/overrides.go | 59 ++ cmd/workspace/clean-rooms/clean-rooms.go | 385 ------------- .../cluster-policies/cluster-policies.go | 5 +- cmd/workspace/clusters/clusters.go | 7 +- cmd/workspace/cmd.go | 4 +- cmd/workspace/credentials/credentials.go | 545 ++++++++++++++++++ cmd/workspace/experiments/experiments.go | 5 +- .../external-locations/external-locations.go | 2 +- cmd/workspace/genie/genie.go | 4 +- .../instance-pools/instance-pools.go | 5 +- cmd/workspace/jobs/jobs.go | 8 +- cmd/workspace/lakeview/lakeview.go | 92 +-- .../model-registry/model-registry.go | 3 +- cmd/workspace/online-tables/online-tables.go | 31 +- cmd/workspace/permissions/permissions.go | 5 +- cmd/workspace/pipelines/pipelines.go | 6 +- cmd/workspace/repos/repos.go | 5 +- .../serving-endpoints/serving-endpoints.go | 3 +- cmd/workspace/settings/settings.go | 4 + cmd/workspace/shares/shares.go | 1 - .../storage-credentials.go | 2 +- cmd/workspace/tables/tables.go | 1 + .../token-management/token-management.go | 5 +- cmd/workspace/users/users.go | 5 +- cmd/workspace/warehouses/warehouses.go | 5 +- cmd/workspace/workspace/workspace.go | 4 +- go.mod | 2 +- go.sum | 4 +- internal/bundle/dashboards_test.go | 6 +- internal/dashboard_assumptions_test.go | 18 +- 49 files changed, 1390 insertions(+), 654 deletions(-) create mode 100755 cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go create mode 100755 cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go create mode 100644 cmd/workspace/apps/overrides.go delete mode 100755 cmd/workspace/clean-rooms/clean-rooms.go create mode 100755 cmd/workspace/credentials/credentials.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 2d9cb6d86..5f4b50860 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -cf9c61453990df0f9453670f2fe68e1b128647a2 \ No newline at end of file +d25296d2f4aa7bd6195c816fdf82e0f960f775da \ No newline at end of file diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index b489a0b0a..ef7977e1b 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -115,6 +115,9 @@ func new{{.PascalName}}() *cobra.Command { {{- if .Request}} var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}} + {{- if .RequestBodyField }} + {{.CamelName}}Req.{{.RequestBodyField.PascalName}} = &{{.Service.Package.Name}}.{{.RequestBodyField.Entity.PascalName}}{} + {{- end }} {{- if .CanUseJson}} var {{.CamelName}}Json flags.JsonFlag {{- end}} @@ -127,21 +130,27 @@ func new{{.PascalName}}() *cobra.Command { cmd.Flags().BoolVar(&{{.CamelName}}SkipWait, "no-wait", {{.CamelName}}SkipWait, `do not wait to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`) cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`) {{end -}} - {{if .Request}}// TODO: short flags + {{- $request := .Request -}} + {{- if .RequestBodyField -}} + {{- $request = .RequestBodyField.Entity -}} + {{- end -}} + {{if $request }}// TODO: short flags {{- if .CanUseJson}} cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`) {{- end}} {{$method := .}} {{ if not .IsJsonOnly }} - {{range .Request.Fields -}} + {{range $request.Fields -}} {{- if not .Required -}} {{if .Entity.IsObject }}// TODO: complex arg: {{.Name}} {{else if .Entity.IsAny }}// TODO: any: {{.Name}} {{else if .Entity.ArrayValue }}// TODO: array: {{.Name}} {{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}} {{else if .Entity.IsEmpty }}// TODO: output-only field - {{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`) - {{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`) + {{else if .Entity.IsComputed -}} + {{else if .IsOutputOnly -}} + {{else if .Entity.Enum }}cmd.Flags().Var(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", `{{.Summary | without "`" | trimSuffix "."}}. Supported values: {{template "printArray" .Entity.Enum}}`) + {{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{- template "request-body-obj" (dict "Method" $method "Field" .)}}, "{{.KebabName}}", {{- template "request-body-obj" (dict "Method" $method "Field" .)}}, `{{.Summary | without "`"}}`) {{end}} {{- end -}} {{- end}} @@ -161,14 +170,14 @@ func new{{.PascalName}}() *cobra.Command { {{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }} {{- $hasPosArgs := .HasRequiredPositionalArguments -}} - {{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}} + {{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len $request.RequiredFields)) -}} {{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}} {{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}} {{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}} {{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}} - {{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt .Request) (eq 1 (len .Request.RequiredRequestBodyFields)) -}} - {{- $onlyPathArgsRequiredAsPositionalArguments := and .Request (eq (len .RequiredPositionalArguments) (len .Request.RequiredPathFields)) -}} - {{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson .Request.HasRequiredRequestBodyFields) -}} + {{- $hasSingleRequiredRequestBodyFieldWithPrompt := and (and $hasIdPrompt $request) (eq 1 (len $request.RequiredRequestBodyFields)) -}} + {{- $onlyPathArgsRequiredAsPositionalArguments := and $request (eq (len .RequiredPositionalArguments) (len $request.RequiredPathFields)) -}} + {{- $hasDifferentArgsWithJsonFlag := and (not $onlyPathArgsRequiredAsPositionalArguments) (and .CanUseJson (or $request.HasRequiredRequestBodyFields )) -}} {{- $hasCustomArgHandler := or $hasRequiredArgs $hasDifferentArgsWithJsonFlag -}} {{- $atleastOneArgumentWithDescription := false -}} @@ -206,12 +215,12 @@ func new{{.PascalName}}() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { {{- if $hasDifferentArgsWithJsonFlag }} if cmd.Flags().Changed("json") { - err := root.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args) + err := root.ExactArgs({{len $request.RequiredPathFields}})(cmd, args) if err != nil { - {{- if eq 0 (len .Request.RequiredPathFields) }} - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") + {{- if eq 0 (len $request.RequiredPathFields) }} + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := $request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") {{- else }} - return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := .Request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := .Request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only{{- range $index, $field := $request.RequiredPathFields}}{{if $index}},{{end}} {{$field.ConstantName}}{{end}} as positional arguments. Provide{{- range $index, $field := $request.RequiredRequestBodyFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") {{- end }} } return nil @@ -232,7 +241,7 @@ func new{{.PascalName}}() *cobra.Command { {{- if .Request }} {{ if .CanUseJson }} if cmd.Flags().Changed("json") { - diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req) + diags := {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req{{ if .RequestBodyField }}.{{.RequestBodyField.PascalName}}{{ end }}) if diags.HasError() { return diags.Error() } @@ -251,20 +260,20 @@ func new{{.PascalName}}() *cobra.Command { {{- if $hasIdPrompt}} if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No{{range .Request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down." + promptSpinner <- "No{{range $request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down." names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for {{.Service.TitleName}} drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}") + id, err := cmdio.Select(ctx, names, "{{range $request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}") + return fmt.Errorf("expected to have {{range $request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}") } {{- end -}} @@ -388,13 +397,19 @@ func new{{.PascalName}}() *cobra.Command { if !cmd.Flags().Changed("json") { {{- end }} {{if not $field.Entity.IsString -}} - _, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}}) + _, err = fmt.Sscan(args[{{$arg}}], &{{- template "request-body-obj" (dict "Method" $method "Field" $field)}}) if err != nil { return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}]) }{{else -}} - {{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}] + {{- template "request-body-obj" (dict "Method" $method "Field" $field)}} = args[{{$arg}}] {{- end -}} {{- if $optionalIfJsonIsUsed }} } {{- end }} {{- end -}} + +{{- define "request-body-obj" -}} + {{- $method := .Method -}} + {{- $field := .Field -}} + {{$method.CamelName}}Req{{ if (and $method.RequestBodyField (not $field.IsPath)) }}.{{$method.RequestBodyField.PascalName}}{{end}}.{{$field.PascalName}} +{{- end -}} diff --git a/.gitattributes b/.gitattributes index ae10198bb..ecb5669ef 100755 --- a/.gitattributes +++ b/.gitattributes @@ -30,13 +30,14 @@ cmd/account/users/users.go linguist-generated=true cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true cmd/account/workspaces/workspaces.go linguist-generated=true +cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go linguist-generated=true +cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go linguist-generated=true cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true cmd/workspace/alerts/alerts.go linguist-generated=true cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true -cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true @@ -48,6 +49,7 @@ cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true +cmd/workspace/credentials/credentials.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index d2a1d0c7d..59b8547be 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -214,7 +214,7 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos // Dashboards: Prefix for key, dashboard := range r.Dashboards { - if dashboard == nil || dashboard.CreateDashboardRequest == nil { + if dashboard == nil || dashboard.Dashboard == nil { diags = diags.Extend(diag.Errorf("dashboard %s s is not defined", key)) continue } diff --git a/bundle/config/mutator/configure_dashboard_defaults_test.go b/bundle/config/mutator/configure_dashboard_defaults_test.go index 4804b7159..2234f9a73 100644 --- a/bundle/config/mutator/configure_dashboard_defaults_test.go +++ b/bundle/config/mutator/configure_dashboard_defaults_test.go @@ -26,13 +26,13 @@ func TestConfigureDashboardDefaultsParentPath(t *testing.T) { "d1": { // Empty string is skipped. // See below for how it is set. - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ ParentPath: "", }, }, "d2": { // Non-empty string is skipped. - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ ParentPath: "already-set", }, }, diff --git a/bundle/config/mutator/initialize_urls_test.go b/bundle/config/mutator/initialize_urls_test.go index 61103de80..16b67dac8 100644 --- a/bundle/config/mutator/initialize_urls_test.go +++ b/bundle/config/mutator/initialize_urls_test.go @@ -89,7 +89,7 @@ func TestInitializeURLs(t *testing.T) { Dashboards: map[string]*resources.Dashboard{ "dashboard1": { ID: "01ef8d56871e1d50ae30ce7375e42478", - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ DisplayName: "My special dashboard", }, }, diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index d76d2d8f3..b694f627a 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -126,7 +126,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, Dashboards: map[string]*resources.Dashboard{ "dashboard1": { - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ DisplayName: "dashboard1", }, }, diff --git a/bundle/config/mutator/translate_paths_dashboards_test.go b/bundle/config/mutator/translate_paths_dashboards_test.go index c386f1bbe..5e4e69f5d 100644 --- a/bundle/config/mutator/translate_paths_dashboards_test.go +++ b/bundle/config/mutator/translate_paths_dashboards_test.go @@ -28,7 +28,7 @@ func TestTranslatePathsDashboards_FilePathRelativeSubDirectory(t *testing.T) { Resources: config.Resources{ Dashboards: map[string]*resources.Dashboard{ "dashboard": { - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ DisplayName: "My Dashboard", }, FilePath: "../src/my_dashboard.lvdash.json", diff --git a/bundle/config/resources/dashboard.go b/bundle/config/resources/dashboard.go index 462dbc564..724b03393 100644 --- a/bundle/config/resources/dashboard.go +++ b/bundle/config/resources/dashboard.go @@ -17,7 +17,7 @@ type Dashboard struct { ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` URL string `json:"url,omitempty" bundle:"internal"` - *dashboards.CreateDashboardRequest + *dashboards.Dashboard // ========================= // === Additional fields === diff --git a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go index c13f800f7..25aee125f 100644 --- a/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go +++ b/bundle/deploy/terraform/check_dashboards_modified_remotely_test.go @@ -29,7 +29,7 @@ func mockDashboardBundle(t *testing.T) *bundle.Bundle { Resources: config.Resources{ Dashboards: map[string]*resources.Dashboard{ "dash1": { - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ DisplayName: "My Special Dashboard", }, }, diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index 3f69bbed4..6ed34d430 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -792,7 +792,7 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, Dashboards: map[string]*resources.Dashboard{ "test_dashboard": { - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ DisplayName: "test_dashboard", }, }, @@ -951,12 +951,12 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, Dashboards: map[string]*resources.Dashboard{ "test_dashboard": { - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ DisplayName: "test_dashboard", }, }, "test_dashboard_new": { - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ DisplayName: "test_dashboard_new", }, }, diff --git a/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go index 9cefbc10e..539ba21aa 100644 --- a/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_dashboard_test.go @@ -15,7 +15,7 @@ import ( func TestConvertDashboard(t *testing.T) { var src = resources.Dashboard{ - CreateDashboardRequest: &dashboards.CreateDashboardRequest{ + Dashboard: &dashboards.Dashboard{ DisplayName: "my dashboard", WarehouseId: "f00dcafe", ParentPath: "/some/path", diff --git a/bundle/schema/embed_test.go b/bundle/schema/embed_test.go index dcb381b83..e4b45baa5 100644 --- a/bundle/schema/embed_test.go +++ b/bundle/schema/embed_test.go @@ -59,9 +59,14 @@ func TestJsonSchema(t *testing.T) { } // Assert enum values are loaded - schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "catalog.MonitorCronSchedule") - assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "PAUSED") - assert.Contains(t, schedule.AnyOf[0].Properties["pause_status"].Enum, "UNPAUSED") + schedule := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "pipelines.RestartWindow") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "MONDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "TUESDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "WEDNESDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "THURSDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "FRIDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SATURDAY") + assert.Contains(t, schedule.AnyOf[0].Properties["days_of_week"].Enum, "SUNDAY") providers := walk(s.Definitions, "github.com", "databricks", "databricks-sdk-go", "service", "jobs.GitProvider") assert.Contains(t, providers.Enum, "gitHub") diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 62e5fe6d8..dc0d7f953 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -185,6 +185,14 @@ { "type": "object", "properties": { + "create_time": { + "description": "The timestamp of when the dashboard was created.", + "$ref": "#/$defs/string" + }, + "dashboard_id": { + "description": "UUID identifying the dashboard.", + "$ref": "#/$defs/string" + }, "display_name": { "description": "The display name of the dashboard.", "$ref": "#/$defs/string" @@ -192,13 +200,25 @@ "embed_credentials": { "$ref": "#/$defs/bool" }, + "etag": { + "description": "The etag for the dashboard. Can be optionally provided on updates to ensure that the dashboard\nhas not been modified since the last read.\nThis field is excluded in List Dashboards responses.", + "$ref": "#/$defs/string" + }, "file_path": { "$ref": "#/$defs/string" }, + "lifecycle_state": { + "description": "The state of the dashboard resource. Used for tracking trashed status.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/dashboards.LifecycleState" + }, "parent_path": { "description": "The workspace path of the folder containing the dashboard. Includes leading slash and no\ntrailing slash.\nThis field is excluded in List Dashboards responses.", "$ref": "#/$defs/string" }, + "path": { + "description": "The workspace path of the dashboard asset, including the file name.\nExported dashboards always have the file extension `.lvdash.json`.\nThis field is excluded in List Dashboards responses.", + "$ref": "#/$defs/string" + }, "permissions": { "$ref": "#/$defs/slice/github.com/databricks/cli/bundle/config/resources.Permission" }, @@ -206,15 +226,16 @@ "description": "The contents of the dashboard in serialized string form.\nThis field is excluded in List Dashboards responses.\nUse the [get dashboard API](https://docs.databricks.com/api/workspace/lakeview/get)\nto retrieve an example response, which includes the `serialized_dashboard` field.\nThis field provides the structure of the JSON string that represents the dashboard's\nlayout and components.", "$ref": "#/$defs/interface" }, + "update_time": { + "description": "The timestamp of when the dashboard was last updated by the user.\nThis field is excluded in List Dashboards responses.", + "$ref": "#/$defs/string" + }, "warehouse_id": { "description": "The warehouse ID used to run the dashboard.", "$ref": "#/$defs/string" } }, - "additionalProperties": false, - "required": [ - "display_name" - ] + "additionalProperties": false }, { "type": "string", @@ -551,7 +572,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.Filters" }, "gateway_definition": { - "description": "The definition of a gateway pipeline to support CDC.", + "description": "The definition of a gateway pipeline to support change data capture.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.IngestionGatewayPipelineDefinition" }, "id": { @@ -581,6 +602,10 @@ "description": "Whether Photon is enabled for this pipeline.", "$ref": "#/$defs/bool" }, + "restart_window": { + "description": "Restart window of this pipeline.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindow" + }, "schema": { "description": "The default schema (database) where tables are read from or published to. The presence of this field implies that the pipeline is in direct publishing mode.", "$ref": "#/$defs/string" @@ -1289,11 +1314,7 @@ "properties": { "pause_status": { "description": "Read only field that indicates whether a schedule is paused or not.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus", - "enum": [ - "UNPAUSED", - "PAUSED" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorCronSchedulePauseStatus" }, "quartz_cron_expression": { "description": "The expression that determines when to run the monitor. See [examples](https://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html).\n", @@ -1317,7 +1338,12 @@ ] }, "catalog.MonitorCronSchedulePauseStatus": { - "type": "string" + "type": "string", + "description": "Read only field that indicates whether a schedule is paused or not.", + "enum": [ + "UNPAUSED", + "PAUSED" + ] }, "catalog.MonitorDataClassificationConfig": { "anyOf": [ @@ -1382,11 +1408,7 @@ }, "problem_type": { "description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType", - "enum": [ - "PROBLEM_TYPE_CLASSIFICATION", - "PROBLEM_TYPE_REGRESSION" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorInferenceLogProblemType" }, "timestamp_col": { "description": "Column that contains the timestamps of requests. The column must be one of the following:\n- A ``TimestampType`` column\n- A column whose values can be converted to timestamps through the pyspark\n ``to_timestamp`` [function](https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html).\n", @@ -1409,7 +1431,12 @@ ] }, "catalog.MonitorInferenceLogProblemType": { - "type": "string" + "type": "string", + "description": "Problem type the model aims to solve. Determines the type of model-quality metrics that will be computed.", + "enum": [ + "PROBLEM_TYPE_CLASSIFICATION", + "PROBLEM_TYPE_REGRESSION" + ] }, "catalog.MonitorMetric": { "anyOf": [ @@ -1434,12 +1461,7 @@ }, "type": { "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType", - "enum": [ - "CUSTOM_METRIC_TYPE_AGGREGATE", - "CUSTOM_METRIC_TYPE_DERIVED", - "CUSTOM_METRIC_TYPE_DRIFT" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorMetricType" } }, "additionalProperties": false, @@ -1458,7 +1480,13 @@ ] }, "catalog.MonitorMetricType": { - "type": "string" + "type": "string", + "description": "Can only be one of ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"``, ``\"CUSTOM_METRIC_TYPE_DERIVED\"``, or ``\"CUSTOM_METRIC_TYPE_DRIFT\"``.\nThe ``\"CUSTOM_METRIC_TYPE_AGGREGATE\"`` and ``\"CUSTOM_METRIC_TYPE_DERIVED\"`` metrics\nare computed on a single table, whereas the ``\"CUSTOM_METRIC_TYPE_DRIFT\"`` compare metrics across\nbaseline and input table, or across the two consecutive time windows.\n- CUSTOM_METRIC_TYPE_AGGREGATE: only depend on the existing columns in your table\n- CUSTOM_METRIC_TYPE_DERIVED: depend on previously computed aggregate metrics\n- CUSTOM_METRIC_TYPE_DRIFT: depend on previously computed aggregate or derived metrics\n", + "enum": [ + "CUSTOM_METRIC_TYPE_AGGREGATE", + "CUSTOM_METRIC_TYPE_DERIVED", + "CUSTOM_METRIC_TYPE_DRIFT" + ] }, "catalog.MonitorNotifications": { "anyOf": [ @@ -2325,6 +2353,13 @@ } ] }, + "dashboards.LifecycleState": { + "type": "string", + "enum": [ + "ACTIVE", + "TRASHED" + ] + }, "jobs.Condition": { "type": "string", "enum": [ @@ -3102,7 +3137,7 @@ "$ref": "#/$defs/slice/string" }, "jar_params": { - "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.", + "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "$ref": "#/$defs/slice/string" }, "job_id": { @@ -3436,11 +3471,11 @@ "type": "object", "properties": { "condition_task": { - "description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.", + "description": "The task evaluates a condition that can be used to control the execution of other tasks when the `condition_task` field is present.\nThe condition task does not require a cluster to execute and does not support retries or notifications.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ConditionTask" }, "dbt_task": { - "description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", + "description": "The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.DbtTask" }, "depends_on": { @@ -3468,7 +3503,7 @@ "$ref": "#/$defs/string" }, "for_each_task": { - "description": "If for_each_task, indicates that this task must execute the nested task within it.", + "description": "The task executes a nested task for every input provided when the `for_each_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.ForEachTask" }, "health": { @@ -3495,7 +3530,7 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/compute.ClusterSpec" }, "notebook_task": { - "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", + "description": "The task runs a notebook when the `notebook_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.NotebookTask" }, "notification_settings": { @@ -3503,11 +3538,11 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.TaskNotificationSettings" }, "pipeline_task": { - "description": "If pipeline_task, indicates that this task must execute a Pipeline.", + "description": "The task triggers a pipeline update when the `pipeline_task` field is present. Only pipelines configured to use triggered more are supported.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PipelineTask" }, "python_wheel_task": { - "description": "If python_wheel_task, indicates that this job must execute a PythonWheel.", + "description": "The task runs a Python wheel when the `python_wheel_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.PythonWheelTask" }, "retry_on_timeout": { @@ -3519,23 +3554,23 @@ "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunIf" }, "run_job_task": { - "description": "If run_job_task, indicates that this task must execute another job.", + "description": "The task triggers another job when the `run_job_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.RunJobTask" }, "spark_jar_task": { - "description": "If spark_jar_task, indicates that this task must run a JAR.", + "description": "The task runs a JAR when the `spark_jar_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkJarTask" }, "spark_python_task": { - "description": "If spark_python_task, indicates that this task must run a Python file.", + "description": "The task runs a Python file when the `spark_python_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkPythonTask" }, "spark_submit_task": { - "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", + "description": "(Legacy) The task runs the spark-submit script when the `spark_submit_task` field is present. This task can run only on new clusters and is not compatible with serverless compute.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SparkSubmitTask" }, "sql_task": { - "description": "If sql_task, indicates that this job must execute a SQL task.", + "description": "The task runs a SQL query or file, or it refreshes a SQL alert or a legacy SQL dashboard when the `sql_task` field is present.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/jobs.SqlTask" }, "task_key": { @@ -3821,12 +3856,7 @@ }, "status": { "description": "Current status of `model_version`", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus", - "enum": [ - "PENDING_REGISTRATION", - "FAILED_REGISTRATION", - "READY" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/ml.ModelVersionStatus" }, "status_message": { "description": "Details on current `status`, if it is pending or failed.", @@ -3854,7 +3884,13 @@ ] }, "ml.ModelVersionStatus": { - "type": "string" + "type": "string", + "description": "Current status of `model_version`", + "enum": [ + "PENDING_REGISTRATION", + "FAILED_REGISTRATION", + "READY" + ] }, "ml.ModelVersionTag": { "anyOf": [ @@ -3951,15 +3987,15 @@ "type": "object", "properties": { "report": { - "description": "Select tables from a specific source report.", + "description": "Select a specific source report.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.ReportSpec" }, "schema": { - "description": "Select tables from a specific source schema.", + "description": "Select all tables from a specific source schema.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.SchemaSpec" }, "table": { - "description": "Select tables from a specific source table.", + "description": "Select a specific source table.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpec" } }, @@ -3977,7 +4013,11 @@ "type": "object", "properties": { "connection_id": { - "description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source.", + "description": "[Deprecated, use connection_name instead] Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.", + "$ref": "#/$defs/string" + }, + "connection_name": { + "description": "Immutable. The Unity Catalog connection that this gateway pipeline uses to communicate with the source.", "$ref": "#/$defs/string" }, "gateway_storage_catalog": { @@ -4007,11 +4047,11 @@ "type": "object", "properties": { "connection_name": { - "description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name.", + "description": "Immutable. The Unity Catalog connection that this ingestion pipeline uses to communicate with the source. This is used with connectors for applications like Salesforce, Workday, and so on.", "$ref": "#/$defs/string" }, "ingestion_gateway_id": { - "description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name.", + "description": "Immutable. Identifier for the gateway that is used by this ingestion pipeline to communicate with the source database. This is used with connectors to databases like SQL Server.", "$ref": "#/$defs/string" }, "objects": { @@ -4188,11 +4228,7 @@ }, "mode": { "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode", - "enum": [ - "ENHANCED", - "LEGACY" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.PipelineClusterAutoscaleMode" } }, "additionalProperties": false, @@ -4208,7 +4244,12 @@ ] }, "pipelines.PipelineClusterAutoscaleMode": { - "type": "string" + "type": "string", + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n", + "enum": [ + "ENHANCED", + "LEGACY" + ] }, "pipelines.PipelineDeployment": { "anyOf": [ @@ -4320,6 +4361,47 @@ } ] }, + "pipelines.RestartWindow": { + "anyOf": [ + { + "type": "object", + "properties": { + "days_of_week": { + "description": "Days of week in which the restart is allowed to happen (within a five-hour window starting at start_hour).\nIf not specified all days of the week will be used.", + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.RestartWindowDaysOfWeek", + "enum": [ + "MONDAY", + "TUESDAY", + "WEDNESDAY", + "THURSDAY", + "FRIDAY", + "SATURDAY", + "SUNDAY" + ] + }, + "start_hour": { + "description": "An integer between 0 and 23 denoting the start hour for the restart window in the 24-hour day.\nContinuous pipeline restart is triggered only within a five-hour window starting at this hour.", + "$ref": "#/$defs/int" + }, + "time_zone_id": { + "description": "Time zone id of restart window. See https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html for details.\nIf not specified, UTC will be used.", + "$ref": "#/$defs/string" + } + }, + "additionalProperties": false, + "required": [ + "start_hour" + ] + }, + { + "type": "string", + "pattern": "\\$\\{(var(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)+)\\}" + } + ] + }, + "pipelines.RestartWindowDaysOfWeek": { + "type": "string" + }, "pipelines.SchemaSpec": { "anyOf": [ { @@ -4411,11 +4493,7 @@ }, "scd_type": { "description": "The SCD type to use to ingest the table.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType", - "enum": [ - "SCD_TYPE_1", - "SCD_TYPE_2" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/pipelines.TableSpecificConfigScdType" }, "sequence_by": { "description": "The column names specifying the logical order of events in the source data. Delta Live Tables uses this sequencing to handle change events that arrive out of order.", @@ -4431,7 +4509,12 @@ ] }, "pipelines.TableSpecificConfigScdType": { - "type": "string" + "type": "string", + "description": "The SCD type to use to ingest the table.", + "enum": [ + "SCD_TYPE_1", + "SCD_TYPE_2" + ] }, "serving.Ai21LabsConfig": { "anyOf": [ @@ -4520,11 +4603,7 @@ "properties": { "behavior": { "description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior", - "enum": [ - "NONE", - "BLOCK" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayGuardrailPiiBehaviorBehavior" } }, "additionalProperties": false, @@ -4539,7 +4618,12 @@ ] }, "serving.AiGatewayGuardrailPiiBehaviorBehavior": { - "type": "string" + "type": "string", + "description": "Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is set for the input guardrail and the request contains PII, the request is not sent to the model server and 400 status code is returned; if 'BLOCK' is set for the output guardrail and the model response contains PII, the PII info in the response is redacted and 400 status code is returned.", + "enum": [ + "NONE", + "BLOCK" + ] }, "serving.AiGatewayGuardrails": { "anyOf": [ @@ -4604,18 +4688,11 @@ }, "key": { "description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey", - "enum": [ - "user", - "endpoint" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitKey" }, "renewal_period": { "description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod", - "enum": [ - "minute" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AiGatewayRateLimitRenewalPeriod" } }, "additionalProperties": false, @@ -4631,10 +4708,19 @@ ] }, "serving.AiGatewayRateLimitKey": { - "type": "string" + "type": "string", + "description": "Key field for a rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", + "enum": [ + "user", + "endpoint" + ] }, "serving.AiGatewayRateLimitRenewalPeriod": { - "type": "string" + "type": "string", + "description": "Renewal period field for a rate limit. Currently, only 'minute' is supported.", + "enum": [ + "minute" + ] }, "serving.AiGatewayUsageTrackingConfig": { "anyOf": [ @@ -4681,13 +4767,7 @@ }, "bedrock_provider": { "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider", - "enum": [ - "anthropic", - "cohere", - "ai21labs", - "amazon" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.AmazonBedrockConfigBedrockProvider" } }, "additionalProperties": false, @@ -4703,7 +4783,14 @@ ] }, "serving.AmazonBedrockConfigBedrockProvider": { - "type": "string" + "type": "string", + "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon.", + "enum": [ + "anthropic", + "cohere", + "ai21labs", + "amazon" + ] }, "serving.AnthropicConfig": { "anyOf": [ @@ -4910,17 +4997,7 @@ }, "provider": { "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider", - "enum": [ - "ai21labs", - "anthropic", - "amazon-bedrock", - "cohere", - "databricks-model-serving", - "google-cloud-vertex-ai", - "openai", - "palm" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ExternalModelProvider" }, "task": { "description": "The task type of the external model.", @@ -4941,7 +5018,18 @@ ] }, "serving.ExternalModelProvider": { - "type": "string" + "type": "string", + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n", + "enum": [ + "ai21labs", + "anthropic", + "amazon-bedrock", + "cohere", + "databricks-model-serving", + "google-cloud-vertex-ai", + "openai", + "palm" + ] }, "serving.GoogleCloudVertexAiConfig": { "anyOf": [ @@ -5047,18 +5135,11 @@ }, "key": { "description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey", - "enum": [ - "user", - "endpoint" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitKey" }, "renewal_period": { "description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod", - "enum": [ - "minute" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.RateLimitRenewalPeriod" } }, "additionalProperties": false, @@ -5074,10 +5155,19 @@ ] }, "serving.RateLimitKey": { - "type": "string" + "type": "string", + "description": "Key field for a serving endpoint rate limit. Currently, only 'user' and 'endpoint' are supported, with 'endpoint' being the default if not specified.", + "enum": [ + "user", + "endpoint" + ] }, "serving.RateLimitRenewalPeriod": { - "type": "string" + "type": "string", + "description": "Renewal period field for a serving endpoint rate limit. Currently, only 'minute' is supported.", + "enum": [ + "minute" + ] }, "serving.Route": { "anyOf": [ @@ -5202,23 +5292,11 @@ }, "workload_size": { "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize", - "enum": [ - "Small", - "Medium", - "Large" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadSize" }, "workload_type": { "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n", - "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType", - "enum": [ - "CPU", - "GPU_SMALL", - "GPU_MEDIUM", - "GPU_LARGE", - "MULTIGPU_MEDIUM" - ] + "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/serving.ServedModelInputWorkloadType" } }, "additionalProperties": false, @@ -5235,10 +5313,24 @@ ] }, "serving.ServedModelInputWorkloadSize": { - "type": "string" + "type": "string", + "description": "The workload size of the served model. The workload size corresponds to a range of provisioned concurrency that the compute will autoscale between.\nA single unit of provisioned concurrency can process one request at a time.\nValid workload sizes are \"Small\" (4 - 4 provisioned concurrency), \"Medium\" (8 - 16 provisioned concurrency), and \"Large\" (16 - 64 provisioned concurrency).\nIf scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size will be 0.\n", + "enum": [ + "Small", + "Medium", + "Large" + ] }, "serving.ServedModelInputWorkloadType": { - "type": "string" + "type": "string", + "description": "The workload type of the served model. The workload type selects which type of compute to use in the endpoint. The default value for this parameter is\n\"CPU\". For deep learning workloads, GPU acceleration is available by selecting workload types like GPU_SMALL and others.\nSee the available [GPU types](https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types).\n", + "enum": [ + "CPU", + "GPU_SMALL", + "GPU_MEDIUM", + "GPU_LARGE", + "MULTIGPU_MEDIUM" + ] }, "serving.TrafficConfig": { "anyOf": [ diff --git a/cmd/account/service-principal-secrets/service-principal-secrets.go b/cmd/account/service-principal-secrets/service-principal-secrets.go index 47cfa4b08..f7dc4e88e 100755 --- a/cmd/account/service-principal-secrets/service-principal-secrets.go +++ b/cmd/account/service-principal-secrets/service-principal-secrets.go @@ -191,6 +191,8 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `An opaque page token which was the next_page_token in the response of the previous request to list the secrets for this service principal.`) + cmd.Use = "list SERVICE_PRINCIPAL_ID" cmd.Short = `List service principal secrets.` cmd.Long = `List service principal secrets. diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 82d3d7db9..bee01eb41 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -81,6 +81,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.DeploymentName, "deployment-name", createReq.DeploymentName, `The deployment name defines part of the subdomain for the workspace.`) // TODO: complex arg: gcp_managed_network_config // TODO: complex arg: gke_config + cmd.Flags().BoolVar(&createReq.IsNoPublicIpEnabled, "is-no-public-ip-enabled", createReq.IsNoPublicIpEnabled, `Whether no public IP is enabled for the workspace.`) cmd.Flags().StringVar(&createReq.Location, "location", createReq.Location, `The Google Cloud region of the workspace data plane in your Google account.`) cmd.Flags().StringVar(&createReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", createReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) cmd.Flags().StringVar(&createReq.NetworkId, "network-id", createReq.NetworkId, ``) @@ -420,6 +421,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, ``) cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`) + cmd.Flags().StringVar(&updateReq.PrivateAccessSettingsId, "private-access-settings-id", updateReq.PrivateAccessSettingsId, `The ID of the workspace's private access settings configuration object.`) cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`) cmd.Flags().StringVar(&updateReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.StorageCustomerManagedKeyId, `The ID of the key configuration object for workspace storage.`) diff --git a/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go new file mode 100755 index 000000000..b1adf6103 --- /dev/null +++ b/cmd/workspace/aibi-dashboard-embedding-access-policy/aibi-dashboard-embedding-access-policy.go @@ -0,0 +1,162 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package aibi_dashboard_embedding_access_policy + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "aibi-dashboard-embedding-access-policy", + Short: `Controls whether AI/BI published dashboard embedding is enabled, conditionally enabled, or disabled at the workspace level.`, + Long: `Controls whether AI/BI published dashboard embedding is enabled, conditionally + enabled, or disabled at the workspace level. By default, this setting is + conditionally enabled (ALLOW_APPROVED_DOMAINS).`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetAibiDashboardEmbeddingAccessPolicySettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Retrieve the AI/BI dashboard embedding access policy.` + cmd.Long = `Retrieve the AI/BI dashboard embedding access policy. + + Retrieves the AI/BI dashboard embedding access policy. The default setting is + ALLOW_APPROVED_DOMAINS, permitting AI/BI dashboards to be embedded on approved + domains.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateAibiDashboardEmbeddingAccessPolicySettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the AI/BI dashboard embedding access policy.` + cmd.Long = `Update the AI/BI dashboard embedding access policy. + + Updates the AI/BI dashboard embedding access policy at the workspace level.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.AibiDashboardEmbeddingAccessPolicy().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AibiDashboardEmbeddingAccessPolicy diff --git a/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go new file mode 100755 index 000000000..481197460 --- /dev/null +++ b/cmd/workspace/aibi-dashboard-embedding-approved-domains/aibi-dashboard-embedding-approved-domains.go @@ -0,0 +1,162 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package aibi_dashboard_embedding_approved_domains + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "aibi-dashboard-embedding-approved-domains", + Short: `Controls the list of domains approved to host the embedded AI/BI dashboards.`, + Long: `Controls the list of domains approved to host the embedded AI/BI dashboards. + The approved domains list can't be mutated when the current access policy is + not set to ALLOW_APPROVED_DOMAINS.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetAibiDashboardEmbeddingApprovedDomainsSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Retrieve the list of domains approved to host embedded AI/BI dashboards.` + cmd.Long = `Retrieve the list of domains approved to host embedded AI/BI dashboards. + + Retrieves the list of domains approved to host embedded AI/BI dashboards.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the list of domains approved to host embedded AI/BI dashboards.` + cmd.Long = `Update the list of domains approved to host embedded AI/BI dashboards. + + Updates the list of domains approved to host embedded AI/BI dashboards. This + update will fail if the current workspace access policy is not + ALLOW_APPROVED_DOMAINS.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&updateReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.AibiDashboardEmbeddingApprovedDomains().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AibiDashboardEmbeddingApprovedDomains diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 9331ddc2e..514da697b 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -67,6 +67,7 @@ func newCreate() *cobra.Command { cmd := &cobra.Command{} var createReq apps.CreateAppRequest + createReq.App = &apps.App{} var createJson flags.JsonFlag var createSkipWait bool @@ -77,7 +78,11 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `The description of the app.`) + // TODO: complex arg: active_deployment + // TODO: complex arg: app_status + // TODO: complex arg: compute_status + cmd.Flags().StringVar(&createReq.App.Description, "description", createReq.App.Description, `The description of the app.`) + // TODO: complex arg: pending_deployment // TODO: array: resources cmd.Use = "create NAME" @@ -110,7 +115,7 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createJson.Unmarshal(&createReq) + diags := createJson.Unmarshal(&createReq.App) if diags.HasError() { return diags.Error() } @@ -122,7 +127,7 @@ func newCreate() *cobra.Command { } } if !cmd.Flags().Changed("json") { - createReq.Name = args[0] + createReq.App.Name = args[0] } wait, err := w.Apps.Create(ctx, createReq) @@ -234,6 +239,7 @@ func newDeploy() *cobra.Command { cmd := &cobra.Command{} var deployReq apps.CreateAppDeploymentRequest + deployReq.AppDeployment = &apps.AppDeployment{} var deployJson flags.JsonFlag var deploySkipWait bool @@ -244,9 +250,11 @@ func newDeploy() *cobra.Command { // TODO: short flags cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&deployReq.DeploymentId, "deployment-id", deployReq.DeploymentId, `The unique id of the deployment.`) - cmd.Flags().Var(&deployReq.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`) - cmd.Flags().StringVar(&deployReq.SourceCodePath, "source-code-path", deployReq.SourceCodePath, `The workspace file system path of the source code used to create the app deployment.`) + // TODO: complex arg: deployment_artifacts + cmd.Flags().StringVar(&deployReq.AppDeployment.DeploymentId, "deployment-id", deployReq.AppDeployment.DeploymentId, `The unique id of the deployment.`) + cmd.Flags().Var(&deployReq.AppDeployment.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`) + cmd.Flags().StringVar(&deployReq.AppDeployment.SourceCodePath, "source-code-path", deployReq.AppDeployment.SourceCodePath, `The workspace file system path of the source code used to create the app deployment.`) + // TODO: complex arg: status cmd.Use = "deploy APP_NAME" cmd.Short = `Create an app deployment.` @@ -270,7 +278,7 @@ func newDeploy() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := deployJson.Unmarshal(&deployReq) + diags := deployJson.Unmarshal(&deployReq.AppDeployment) if diags.HasError() { return diags.Error() } @@ -692,8 +700,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set app permissions.` cmd.Long = `Set app permissions. - Sets permissions on an app. Apps can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: APP_NAME: The app for which to get or manage permissions.` @@ -920,28 +929,41 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq apps.UpdateAppRequest + updateReq.App = &apps.App{} var updateJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `The description of the app.`) + // TODO: complex arg: active_deployment + // TODO: complex arg: app_status + // TODO: complex arg: compute_status + cmd.Flags().StringVar(&updateReq.App.Description, "description", updateReq.App.Description, `The description of the app.`) + // TODO: complex arg: pending_deployment // TODO: array: resources - cmd.Use = "update NAME" + cmd.Use = "update NAME NAME" cmd.Short = `Update an app.` cmd.Long = `Update an app. Updates the app with the supplied name. Arguments: + NAME: The name of the app. NAME: The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It must be unique within the workspace.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) return check(cmd, args) } @@ -951,7 +973,7 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := updateJson.Unmarshal(&updateReq) + diags := updateJson.Unmarshal(&updateReq.App) if diags.HasError() { return diags.Error() } @@ -963,6 +985,9 @@ func newUpdate() *cobra.Command { } } updateReq.Name = args[0] + if !cmd.Flags().Changed("json") { + updateReq.App.Name = args[1] + } response, err := w.Apps.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/apps/overrides.go b/cmd/workspace/apps/overrides.go new file mode 100644 index 000000000..debd9f5a6 --- /dev/null +++ b/cmd/workspace/apps/overrides.go @@ -0,0 +1,59 @@ +package apps + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/apps" + "github.com/spf13/cobra" +) + +// We override apps.Update command beccause currently genkit does not support +// a way to identify that path field (such as name) matches the field in the request body. +// As a result, genkit generates a command with 2 required same fields, update NAME NAME. +// This override should be removed when genkit supports this. +func updateOverride(cmd *cobra.Command, req *apps.UpdateAppRequest) { + cmd.Use = "update NAME" + cmd.Long = `Update an app. + + Updates the app with the supplied name. + + Arguments: + NAME: The name of the app. The name must contain only lowercase alphanumeric + characters and hyphens. It must be unique within the workspace.` + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + updateJson := cmd.Flag("json").Value.(*flags.JsonFlag) + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateJson.Unmarshal(&req.App) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + req.Name = args[0] + response, err := w.Apps.Update(ctx, *req) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } +} + +func init() { + updateOverrides = append(updateOverrides, updateOverride) +} diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go deleted file mode 100755 index 72560b846..000000000 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ /dev/null @@ -1,385 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package clean_rooms - -import ( - "fmt" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/sharing" - "github.com/spf13/cobra" -) - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var cmdOverrides []func(*cobra.Command) - -func New() *cobra.Command { - cmd := &cobra.Command{ - Use: "clean-rooms", - Short: `A clean room is a secure, privacy-protecting environment where two or more parties can share sensitive enterprise data, including customer data, for measurements, insights, activation and other use cases.`, - Long: `A clean room is a secure, privacy-protecting environment where two or more - parties can share sensitive enterprise data, including customer data, for - measurements, insights, activation and other use cases. - - To create clean rooms, you must be a metastore admin or a user with the - **CREATE_CLEAN_ROOM** privilege.`, - GroupID: "sharing", - Annotations: map[string]string{ - "package": "sharing", - }, - - // This service is being previewed; hide from help output. - Hidden: true, - } - - // Add methods - cmd.AddCommand(newCreate()) - cmd.AddCommand(newDelete()) - cmd.AddCommand(newGet()) - cmd.AddCommand(newList()) - cmd.AddCommand(newUpdate()) - - // Apply optional overrides to this command. - for _, fn := range cmdOverrides { - fn(cmd) - } - - return cmd -} - -// start create command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var createOverrides []func( - *cobra.Command, - *sharing.CreateCleanRoom, -) - -func newCreate() *cobra.Command { - cmd := &cobra.Command{} - - var createReq sharing.CreateCleanRoom - var createJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) - - cmd.Use = "create" - cmd.Short = `Create a clean room.` - cmd.Long = `Create a clean room. - - Creates a new clean room with specified colaborators. The caller must be a - metastore admin or have the **CREATE_CLEAN_ROOM** privilege on the metastore.` - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := createJson.Unmarshal(&createReq) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") - } - - response, err := w.CleanRooms.Create(ctx, createReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range createOverrides { - fn(cmd, &createReq) - } - - return cmd -} - -// start delete command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteOverrides []func( - *cobra.Command, - *sharing.DeleteCleanRoomRequest, -) - -func newDelete() *cobra.Command { - cmd := &cobra.Command{} - - var deleteReq sharing.DeleteCleanRoomRequest - - // TODO: short flags - - cmd.Use = "delete NAME" - cmd.Short = `Delete a clean room.` - cmd.Long = `Delete a clean room. - - Deletes a data object clean room from the metastore. The caller must be an - owner of the clean room. - - Arguments: - NAME: The name of the clean room.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - deleteReq.Name = args[0] - - err = w.CleanRooms.Delete(ctx, deleteReq) - if err != nil { - return err - } - return nil - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteOverrides { - fn(cmd, &deleteReq) - } - - return cmd -} - -// start get command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getOverrides []func( - *cobra.Command, - *sharing.GetCleanRoomRequest, -) - -func newGet() *cobra.Command { - cmd := &cobra.Command{} - - var getReq sharing.GetCleanRoomRequest - - // TODO: short flags - - cmd.Flags().BoolVar(&getReq.IncludeRemoteDetails, "include-remote-details", getReq.IncludeRemoteDetails, `Whether to include remote details (central) on the clean room.`) - - cmd.Use = "get NAME" - cmd.Short = `Get a clean room.` - cmd.Long = `Get a clean room. - - Gets a data object clean room from the metastore. The caller must be a - metastore admin or the owner of the clean room. - - Arguments: - NAME: The name of the clean room.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - getReq.Name = args[0] - - response, err := w.CleanRooms.Get(ctx, getReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getOverrides { - fn(cmd, &getReq) - } - - return cmd -} - -// start list command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var listOverrides []func( - *cobra.Command, - *sharing.ListCleanRoomsRequest, -) - -func newList() *cobra.Command { - cmd := &cobra.Command{} - - var listReq sharing.ListCleanRoomsRequest - - // TODO: short flags - - cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of clean rooms to return.`) - cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) - - cmd.Use = "list" - cmd.Short = `List clean rooms.` - cmd.Long = `List clean rooms. - - Gets an array of data object clean rooms from the metastore. The caller must - be a metastore admin or the owner of the clean room. There is no guarantee of - a specific ordering of the elements in the array.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - response := w.CleanRooms.List(ctx, listReq) - return cmdio.RenderIterator(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range listOverrides { - fn(cmd, &listReq) - } - - return cmd -} - -// start update command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updateOverrides []func( - *cobra.Command, - *sharing.UpdateCleanRoom, -) - -func newUpdate() *cobra.Command { - cmd := &cobra.Command{} - - var updateReq sharing.UpdateCleanRoom - var updateJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - // TODO: array: catalog_updates - cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) - cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of clean room.`) - - cmd.Use = "update NAME" - cmd.Short = `Update a clean room.` - cmd.Long = `Update a clean room. - - Updates the clean room with the changes and data objects in the request. The - caller must be the owner of the clean room or a metastore admin. - - When the caller is a metastore admin, only the __owner__ field can be updated. - - In the case that the clean room name is changed **updateCleanRoom** requires - that the caller is both the clean room owner and a metastore admin. - - For each table that is added through this method, the clean room owner must - also have **SELECT** privilege on the table. The privilege must be maintained - indefinitely for recipients to be able to access the table. Typically, you - should use a group as the clean room owner. - - Table removals through **update** do not require additional privileges. - - Arguments: - NAME: The name of the clean room.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := updateJson.Unmarshal(&updateReq) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - updateReq.Name = args[0] - - response, err := w.CleanRooms.Update(ctx, updateReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updateOverrides { - fn(cmd, &updateReq) - } - - return cmd -} - -// end service CleanRooms diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index b34dd53db..9e50065f9 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -634,8 +634,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set cluster policy permissions.` cmd.Long = `Set cluster policy permissions. - Sets permissions on a cluster policy. Cluster policies can inherit permissions - from their root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: CLUSTER_POLICY_ID: The cluster policy for which to get or manage permissions.` diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index 0ed454de2..db788753b 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -512,7 +512,7 @@ func newEdit() *cobra.Command { Clusters created by the Databricks Jobs service cannot be edited. Arguments: - CLUSTER_ID: ID of the cluser + CLUSTER_ID: ID of the cluster SPARK_VERSION: The Spark version of the cluster, e.g. 3.3.x-scala2.11. A list of available Spark versions can be retrieved by using the :method:clusters/sparkVersions API call.` @@ -1504,8 +1504,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set cluster permissions.` cmd.Long = `Set cluster permissions. - Sets permissions on a cluster. Clusters can inherit permissions from their - root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: CLUSTER_ID: The cluster for which to get or manage permissions.` diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 3fe5b2686..9cb3cca9e 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -8,7 +8,6 @@ import ( apps "github.com/databricks/cli/cmd/workspace/apps" artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists" catalogs "github.com/databricks/cli/cmd/workspace/catalogs" - clean_rooms "github.com/databricks/cli/cmd/workspace/clean-rooms" cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" clusters "github.com/databricks/cli/cmd/workspace/clusters" connections "github.com/databricks/cli/cmd/workspace/connections" @@ -17,6 +16,7 @@ import ( consumer_listings "github.com/databricks/cli/cmd/workspace/consumer-listings" consumer_personalization_requests "github.com/databricks/cli/cmd/workspace/consumer-personalization-requests" consumer_providers "github.com/databricks/cli/cmd/workspace/consumer-providers" + credentials "github.com/databricks/cli/cmd/workspace/credentials" credentials_manager "github.com/databricks/cli/cmd/workspace/credentials-manager" current_user "github.com/databricks/cli/cmd/workspace/current-user" dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" @@ -98,7 +98,6 @@ func All() []*cobra.Command { out = append(out, apps.New()) out = append(out, artifact_allowlists.New()) out = append(out, catalogs.New()) - out = append(out, clean_rooms.New()) out = append(out, cluster_policies.New()) out = append(out, clusters.New()) out = append(out, connections.New()) @@ -107,6 +106,7 @@ func All() []*cobra.Command { out = append(out, consumer_listings.New()) out = append(out, consumer_personalization_requests.New()) out = append(out, consumer_providers.New()) + out = append(out, credentials.New()) out = append(out, credentials_manager.New()) out = append(out, current_user.New()) out = append(out, dashboard_widgets.New()) diff --git a/cmd/workspace/credentials/credentials.go b/cmd/workspace/credentials/credentials.go new file mode 100755 index 000000000..869df0628 --- /dev/null +++ b/cmd/workspace/credentials/credentials.go @@ -0,0 +1,545 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package credentials + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "credentials", + Short: `A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant.`, + Long: `A credential represents an authentication and authorization mechanism for + accessing services on your cloud tenant. Each credential is subject to Unity + Catalog access-control policies that control which users and groups can access + the credential. + + To create credentials, you must be a Databricks account admin or have the + CREATE SERVICE CREDENTIAL privilege. The user who creates the credential can + delegate ownership to another user or group to manage permissions on it`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreateCredential()) + cmd.AddCommand(newDeleteCredential()) + cmd.AddCommand(newGenerateTemporaryServiceCredential()) + cmd.AddCommand(newGetCredential()) + cmd.AddCommand(newListCredentials()) + cmd.AddCommand(newUpdateCredential()) + cmd.AddCommand(newValidateCredential()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createCredentialOverrides []func( + *cobra.Command, + *catalog.CreateCredentialRequest, +) + +func newCreateCredential() *cobra.Command { + cmd := &cobra.Command{} + + var createCredentialReq catalog.CreateCredentialRequest + var createCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: aws_iam_role + // TODO: complex arg: azure_managed_identity + cmd.Flags().StringVar(&createCredentialReq.Comment, "comment", createCredentialReq.Comment, `Comment associated with the credential.`) + cmd.Flags().StringVar(&createCredentialReq.Name, "name", createCredentialReq.Name, `The credential name.`) + cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE]`) + cmd.Flags().BoolVar(&createCredentialReq.SkipValidation, "skip-validation", createCredentialReq.SkipValidation, `Optional.`) + + cmd.Use = "create-credential" + cmd.Short = `Create a credential.` + cmd.Long = `Create a credential. + + Creates a new credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := createCredentialJson.Unmarshal(&createCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.Credentials.CreateCredential(ctx, createCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createCredentialOverrides { + fn(cmd, &createCredentialReq) + } + + return cmd +} + +// start delete-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteCredentialOverrides []func( + *cobra.Command, + *catalog.DeleteCredentialRequest, +) + +func newDeleteCredential() *cobra.Command { + cmd := &cobra.Command{} + + var deleteCredentialReq catalog.DeleteCredentialRequest + + // TODO: short flags + + cmd.Flags().BoolVar(&deleteCredentialReq.Force, "force", deleteCredentialReq.Force, `Force deletion even if there are dependent services.`) + + cmd.Use = "delete-credential NAME_ARG" + cmd.Short = `Delete a credential.` + cmd.Long = `Delete a credential. + + Deletes a credential from the metastore. The caller must be an owner of the + credential. + + Arguments: + NAME_ARG: Name of the credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteCredentialReq.NameArg = args[0] + + err = w.Credentials.DeleteCredential(ctx, deleteCredentialReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteCredentialOverrides { + fn(cmd, &deleteCredentialReq) + } + + return cmd +} + +// start generate-temporary-service-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var generateTemporaryServiceCredentialOverrides []func( + *cobra.Command, + *catalog.GenerateTemporaryServiceCredentialRequest, +) + +func newGenerateTemporaryServiceCredential() *cobra.Command { + cmd := &cobra.Command{} + + var generateTemporaryServiceCredentialReq catalog.GenerateTemporaryServiceCredentialRequest + var generateTemporaryServiceCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&generateTemporaryServiceCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: azure_options + cmd.Flags().StringVar(&generateTemporaryServiceCredentialReq.CredentialName, "credential-name", generateTemporaryServiceCredentialReq.CredentialName, `The name of the service credential used to generate a temporary credential.`) + + cmd.Use = "generate-temporary-service-credential" + cmd.Short = `Generate a temporary service credential.` + cmd.Long = `Generate a temporary service credential. + + Returns a set of temporary credentials generated using the specified service + credential. The caller must be a metastore admin or have the metastore + privilege **ACCESS** on the service credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := generateTemporaryServiceCredentialJson.Unmarshal(&generateTemporaryServiceCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.Credentials.GenerateTemporaryServiceCredential(ctx, generateTemporaryServiceCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range generateTemporaryServiceCredentialOverrides { + fn(cmd, &generateTemporaryServiceCredentialReq) + } + + return cmd +} + +// start get-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getCredentialOverrides []func( + *cobra.Command, + *catalog.GetCredentialRequest, +) + +func newGetCredential() *cobra.Command { + cmd := &cobra.Command{} + + var getCredentialReq catalog.GetCredentialRequest + + // TODO: short flags + + cmd.Use = "get-credential NAME_ARG" + cmd.Short = `Get a credential.` + cmd.Long = `Get a credential. + + Gets a credential from the metastore. The caller must be a metastore admin, + the owner of the credential, or have any permission on the credential. + + Arguments: + NAME_ARG: Name of the credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getCredentialReq.NameArg = args[0] + + response, err := w.Credentials.GetCredential(ctx, getCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getCredentialOverrides { + fn(cmd, &getCredentialReq) + } + + return cmd +} + +// start list-credentials command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listCredentialsOverrides []func( + *cobra.Command, + *catalog.ListCredentialsRequest, +) + +func newListCredentials() *cobra.Command { + cmd := &cobra.Command{} + + var listCredentialsReq catalog.ListCredentialsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listCredentialsReq.MaxResults, "max-results", listCredentialsReq.MaxResults, `Maximum number of credentials to return.`) + cmd.Flags().StringVar(&listCredentialsReq.PageToken, "page-token", listCredentialsReq.PageToken, `Opaque token to retrieve the next page of results.`) + cmd.Flags().Var(&listCredentialsReq.Purpose, "purpose", `Return only credentials for the specified purpose. Supported values: [SERVICE]`) + + cmd.Use = "list-credentials" + cmd.Short = `List credentials.` + cmd.Long = `List credentials. + + Gets an array of credentials (as __CredentialInfo__ objects). + + The array is limited to only the credentials that the caller has permission to + access. If the caller is a metastore admin, retrieval of credentials is + unrestricted. There is no guarantee of a specific ordering of the elements in + the array.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.Credentials.ListCredentials(ctx, listCredentialsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listCredentialsOverrides { + fn(cmd, &listCredentialsReq) + } + + return cmd +} + +// start update-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateCredentialOverrides []func( + *cobra.Command, + *catalog.UpdateCredentialRequest, +) + +func newUpdateCredential() *cobra.Command { + cmd := &cobra.Command{} + + var updateCredentialReq catalog.UpdateCredentialRequest + var updateCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: aws_iam_role + // TODO: complex arg: azure_managed_identity + cmd.Flags().StringVar(&updateCredentialReq.Comment, "comment", updateCredentialReq.Comment, `Comment associated with the credential.`) + cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force update even if there are dependent services.`) + cmd.Flags().Var(&updateCredentialReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) + cmd.Flags().StringVar(&updateCredentialReq.NewName, "new-name", updateCredentialReq.NewName, `New name of credential.`) + cmd.Flags().StringVar(&updateCredentialReq.Owner, "owner", updateCredentialReq.Owner, `Username of current owner of credential.`) + cmd.Flags().BoolVar(&updateCredentialReq.SkipValidation, "skip-validation", updateCredentialReq.SkipValidation, `Supply true to this argument to skip validation of the updated credential.`) + + cmd.Use = "update-credential NAME_ARG" + cmd.Short = `Update a credential.` + cmd.Long = `Update a credential. + + Updates a credential on the metastore. + + The caller must be the owner of the credential or a metastore admin or have + the MANAGE permission. If the caller is a metastore admin, only the + __owner__ field can be changed. + + Arguments: + NAME_ARG: Name of the credential.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := updateCredentialJson.Unmarshal(&updateCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + updateCredentialReq.NameArg = args[0] + + response, err := w.Credentials.UpdateCredential(ctx, updateCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateCredentialOverrides { + fn(cmd, &updateCredentialReq) + } + + return cmd +} + +// start validate-credential command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var validateCredentialOverrides []func( + *cobra.Command, + *catalog.ValidateCredentialRequest, +) + +func newValidateCredential() *cobra.Command { + cmd := &cobra.Command{} + + var validateCredentialReq catalog.ValidateCredentialRequest + var validateCredentialJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&validateCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: aws_iam_role + // TODO: complex arg: azure_managed_identity + cmd.Flags().StringVar(&validateCredentialReq.CredentialName, "credential-name", validateCredentialReq.CredentialName, `Required.`) + cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE]`) + + cmd.Use = "validate-credential" + cmd.Short = `Validate a credential.` + cmd.Long = `Validate a credential. + + Validates a credential. + + Either the __credential_name__ or the cloud-specific credential must be + provided. + + The caller must be a metastore admin or the credential owner.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + diags := validateCredentialJson.Unmarshal(&validateCredentialReq) + if diags.HasError() { + return diags.Error() + } + if len(diags) > 0 { + err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) + if err != nil { + return err + } + } + } + + response, err := w.Credentials.ValidateCredential(ctx, validateCredentialReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range validateCredentialOverrides { + fn(cmd, &validateCredentialReq) + } + + return cmd +} + +// end service Credentials diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 4c6b57d18..b5173aebf 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -2034,8 +2034,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set experiment permissions.` cmd.Long = `Set experiment permissions. - Sets permissions on an experiment. Experiments can inherit permissions from - their root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: EXPERIMENT_ID: The experiment for which to get or manage permissions.` diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 97d34df09..82fd8d7e1 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -356,7 +356,7 @@ func newUpdate() *cobra.Command { // TODO: complex arg: encryption_details cmd.Flags().BoolVar(&updateReq.Fallback, "fallback", updateReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`) cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`) - cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the external location.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Indicates whether the external location is read-only.`) diff --git a/cmd/workspace/genie/genie.go b/cmd/workspace/genie/genie.go index 287bcde63..25fa9396d 100755 --- a/cmd/workspace/genie/genie.go +++ b/cmd/workspace/genie/genie.go @@ -160,13 +160,13 @@ func newCreateMessage() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var executeMessageQueryOverrides []func( *cobra.Command, - *dashboards.ExecuteMessageQueryRequest, + *dashboards.GenieExecuteMessageQueryRequest, ) func newExecuteMessageQuery() *cobra.Command { cmd := &cobra.Command{} - var executeMessageQueryReq dashboards.ExecuteMessageQueryRequest + var executeMessageQueryReq dashboards.GenieExecuteMessageQueryRequest // TODO: short flags diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 8a84df946..40c76a5dd 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -635,8 +635,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set instance pool permissions.` cmd.Long = `Set instance pool permissions. - Sets permissions on an instance pool. Instance pools can inherit permissions - from their root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: INSTANCE_POOL_ID: The instance pool for which to get or manage permissions.` diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 9e8db43d0..b067937e2 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -847,7 +847,7 @@ func newGetRun() *cobra.Command { cmd.Flags().BoolVar(&getRunReq.IncludeHistory, "include-history", getRunReq.IncludeHistory, `Whether to include the repair history in the response.`) cmd.Flags().BoolVar(&getRunReq.IncludeResolvedValues, "include-resolved-values", getRunReq.IncludeResolvedValues, `Whether to include resolved parameter values in the response.`) - cmd.Flags().StringVar(&getRunReq.PageToken, "page-token", getRunReq.PageToken, `To list the next page or the previous page of job tasks, set this field to the value of the next_page_token or prev_page_token returned in the GetJob response.`) + cmd.Flags().StringVar(&getRunReq.PageToken, "page-token", getRunReq.PageToken, `To list the next page of job tasks, set this field to the value of the next_page_token returned in the GetJob response.`) cmd.Use = "get-run RUN_ID" cmd.Short = `Get a single job run.` @@ -1339,6 +1339,7 @@ func newRunNow() *cobra.Command { // TODO: array: jar_params // TODO: map via StringToStringVar: job_parameters // TODO: map via StringToStringVar: notebook_params + // TODO: array: only // TODO: complex arg: pipeline_params // TODO: map via StringToStringVar: python_named_params // TODO: array: python_params @@ -1470,8 +1471,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set job permissions.` cmd.Long = `Set job permissions. - Sets permissions on a job. Jobs can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: JOB_ID: The job for which to get or manage permissions.` diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 33a45c65f..239c72b6e 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -70,35 +70,26 @@ func newCreate() *cobra.Command { cmd := &cobra.Command{} var createReq dashboards.CreateDashboardRequest + createReq.Dashboard = &dashboards.Dashboard{} var createJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.ParentPath, "parent-path", createReq.ParentPath, `The workspace path of the folder containing the dashboard.`) - cmd.Flags().StringVar(&createReq.SerializedDashboard, "serialized-dashboard", createReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`) - cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `The warehouse ID used to run the dashboard.`) + cmd.Flags().StringVar(&createReq.Dashboard.DisplayName, "display-name", createReq.Dashboard.DisplayName, `The display name of the dashboard.`) + cmd.Flags().StringVar(&createReq.Dashboard.SerializedDashboard, "serialized-dashboard", createReq.Dashboard.SerializedDashboard, `The contents of the dashboard in serialized string form.`) + cmd.Flags().StringVar(&createReq.Dashboard.WarehouseId, "warehouse-id", createReq.Dashboard.WarehouseId, `The warehouse ID used to run the dashboard.`) - cmd.Use = "create DISPLAY_NAME" + cmd.Use = "create" cmd.Short = `Create dashboard.` cmd.Long = `Create dashboard. - Create a draft dashboard. - - Arguments: - DISPLAY_NAME: The display name of the dashboard.` + Create a draft dashboard.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'display_name' in your JSON input") - } - return nil - } - check := root.ExactArgs(1) + check := root.ExactArgs(0) return check(cmd, args) } @@ -108,7 +99,7 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createJson.Unmarshal(&createReq) + diags := createJson.Unmarshal(&createReq.Dashboard) if diags.HasError() { return diags.Error() } @@ -119,9 +110,6 @@ func newCreate() *cobra.Command { } } } - if !cmd.Flags().Changed("json") { - createReq.DisplayName = args[0] - } response, err := w.Lakeview.Create(ctx, createReq) if err != nil { @@ -155,13 +143,15 @@ func newCreateSchedule() *cobra.Command { cmd := &cobra.Command{} var createScheduleReq dashboards.CreateScheduleRequest + createScheduleReq.Schedule = &dashboards.Schedule{} var createScheduleJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createScheduleReq.DisplayName, "display-name", createScheduleReq.DisplayName, `The display name for schedule.`) - cmd.Flags().Var(&createScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + cmd.Flags().StringVar(&createScheduleReq.Schedule.DisplayName, "display-name", createScheduleReq.Schedule.DisplayName, `The display name for schedule.`) + cmd.Flags().Var(&createScheduleReq.Schedule.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + cmd.Flags().StringVar(&createScheduleReq.Schedule.WarehouseId, "warehouse-id", createScheduleReq.Schedule.WarehouseId, `The warehouse id to run the dashboard with for the schedule.`) cmd.Use = "create-schedule DASHBOARD_ID" cmd.Short = `Create dashboard schedule.` @@ -176,6 +166,13 @@ func newCreateSchedule() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cron_schedule' in your JSON input") + } + return nil + } check := root.ExactArgs(1) return check(cmd, args) } @@ -186,7 +183,7 @@ func newCreateSchedule() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createScheduleJson.Unmarshal(&createScheduleReq) + diags := createScheduleJson.Unmarshal(&createScheduleReq.Schedule) if diags.HasError() { return diags.Error() } @@ -196,8 +193,6 @@ func newCreateSchedule() *cobra.Command { return err } } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } createScheduleReq.DashboardId = args[0] @@ -233,6 +228,7 @@ func newCreateSubscription() *cobra.Command { cmd := &cobra.Command{} var createSubscriptionReq dashboards.CreateSubscriptionRequest + createSubscriptionReq.Subscription = &dashboards.Subscription{} var createSubscriptionJson flags.JsonFlag // TODO: short flags @@ -252,6 +248,13 @@ func newCreateSubscription() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'subscriber' in your JSON input") + } + return nil + } check := root.ExactArgs(2) return check(cmd, args) } @@ -262,7 +265,7 @@ func newCreateSubscription() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createSubscriptionJson.Unmarshal(&createSubscriptionReq) + diags := createSubscriptionJson.Unmarshal(&createSubscriptionReq.Subscription) if diags.HasError() { return diags.Error() } @@ -272,8 +275,6 @@ func newCreateSubscription() *cobra.Command { return err } } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } createSubscriptionReq.DashboardId = args[0] createSubscriptionReq.ScheduleId = args[1] @@ -313,8 +314,6 @@ func newDeleteSchedule() *cobra.Command { // TODO: short flags - cmd.Flags().StringVar(&deleteScheduleReq.Etag, "etag", deleteScheduleReq.Etag, `The etag for the schedule.`) - cmd.Use = "delete-schedule DASHBOARD_ID SCHEDULE_ID" cmd.Short = `Delete dashboard schedule.` cmd.Long = `Delete dashboard schedule. @@ -376,8 +375,6 @@ func newDeleteSubscription() *cobra.Command { // TODO: short flags - cmd.Flags().StringVar(&deleteSubscriptionReq.Etag, "etag", deleteSubscriptionReq.Etag, `The etag for the subscription.`) - cmd.Use = "delete-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID" cmd.Short = `Delete schedule subscription.` cmd.Long = `Delete schedule subscription. @@ -682,7 +679,6 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The number of dashboards to return per page.`) - cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token, received from a previous ListDashboards call.`) cmd.Flags().BoolVar(&listReq.ShowTrashed, "show-trashed", listReq.ShowTrashed, `The flag to include dashboards located in the trash.`) cmd.Flags().Var(&listReq.View, "view", `DASHBOARD_VIEW_BASIConly includes summary metadata from the dashboard. Supported values: [DASHBOARD_VIEW_BASIC]`) @@ -735,7 +731,6 @@ func newListSchedules() *cobra.Command { // TODO: short flags cmd.Flags().IntVar(&listSchedulesReq.PageSize, "page-size", listSchedulesReq.PageSize, `The number of schedules to return per page.`) - cmd.Flags().StringVar(&listSchedulesReq.PageToken, "page-token", listSchedulesReq.PageToken, `A page token, received from a previous ListSchedules call.`) cmd.Use = "list-schedules DASHBOARD_ID" cmd.Short = `List dashboard schedules.` @@ -794,7 +789,6 @@ func newListSubscriptions() *cobra.Command { // TODO: short flags cmd.Flags().IntVar(&listSubscriptionsReq.PageSize, "page-size", listSubscriptionsReq.PageSize, `The number of subscriptions to return per page.`) - cmd.Flags().StringVar(&listSubscriptionsReq.PageToken, "page-token", listSubscriptionsReq.PageToken, `A page token, received from a previous ListSubscriptions call.`) cmd.Use = "list-subscriptions DASHBOARD_ID SCHEDULE_ID" cmd.Short = `List schedule subscriptions.` @@ -1126,15 +1120,15 @@ func newUpdate() *cobra.Command { cmd := &cobra.Command{} var updateReq dashboards.UpdateDashboardRequest + updateReq.Dashboard = &dashboards.Dashboard{} var updateJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.DisplayName, "display-name", updateReq.DisplayName, `The display name of the dashboard.`) - cmd.Flags().StringVar(&updateReq.Etag, "etag", updateReq.Etag, `The etag for the dashboard.`) - cmd.Flags().StringVar(&updateReq.SerializedDashboard, "serialized-dashboard", updateReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`) - cmd.Flags().StringVar(&updateReq.WarehouseId, "warehouse-id", updateReq.WarehouseId, `The warehouse ID used to run the dashboard.`) + cmd.Flags().StringVar(&updateReq.Dashboard.DisplayName, "display-name", updateReq.Dashboard.DisplayName, `The display name of the dashboard.`) + cmd.Flags().StringVar(&updateReq.Dashboard.SerializedDashboard, "serialized-dashboard", updateReq.Dashboard.SerializedDashboard, `The contents of the dashboard in serialized string form.`) + cmd.Flags().StringVar(&updateReq.Dashboard.WarehouseId, "warehouse-id", updateReq.Dashboard.WarehouseId, `The warehouse ID used to run the dashboard.`) cmd.Use = "update DASHBOARD_ID" cmd.Short = `Update dashboard.` @@ -1158,7 +1152,7 @@ func newUpdate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := updateJson.Unmarshal(&updateReq) + diags := updateJson.Unmarshal(&updateReq.Dashboard) if diags.HasError() { return diags.Error() } @@ -1203,14 +1197,15 @@ func newUpdateSchedule() *cobra.Command { cmd := &cobra.Command{} var updateScheduleReq dashboards.UpdateScheduleRequest + updateScheduleReq.Schedule = &dashboards.Schedule{} var updateScheduleJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateScheduleReq.DisplayName, "display-name", updateScheduleReq.DisplayName, `The display name for schedule.`) - cmd.Flags().StringVar(&updateScheduleReq.Etag, "etag", updateScheduleReq.Etag, `The etag for the schedule.`) - cmd.Flags().Var(&updateScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + cmd.Flags().StringVar(&updateScheduleReq.Schedule.DisplayName, "display-name", updateScheduleReq.Schedule.DisplayName, `The display name for schedule.`) + cmd.Flags().Var(&updateScheduleReq.Schedule.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + cmd.Flags().StringVar(&updateScheduleReq.Schedule.WarehouseId, "warehouse-id", updateScheduleReq.Schedule.WarehouseId, `The warehouse id to run the dashboard with for the schedule.`) cmd.Use = "update-schedule DASHBOARD_ID SCHEDULE_ID" cmd.Short = `Update dashboard schedule.` @@ -1226,6 +1221,13 @@ func newUpdateSchedule() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cron_schedule' in your JSON input") + } + return nil + } check := root.ExactArgs(2) return check(cmd, args) } @@ -1236,7 +1238,7 @@ func newUpdateSchedule() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := updateScheduleJson.Unmarshal(&updateScheduleReq) + diags := updateScheduleJson.Unmarshal(&updateScheduleReq.Schedule) if diags.HasError() { return diags.Error() } @@ -1246,8 +1248,6 @@ func newUpdateSchedule() *cobra.Command { return err } } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } updateScheduleReq.DashboardId = args[0] updateScheduleReq.ScheduleId = args[1] diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index b45d83e3d..194464691 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -2123,7 +2123,8 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set registered model permissions.` cmd.Long = `Set registered model permissions. - Sets permissions on a registered model. Registered models can inherit + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. Arguments: diff --git a/cmd/workspace/online-tables/online-tables.go b/cmd/workspace/online-tables/online-tables.go index 1c25d1e26..f050017ec 100755 --- a/cmd/workspace/online-tables/online-tables.go +++ b/cmd/workspace/online-tables/online-tables.go @@ -3,6 +3,9 @@ package online_tables import ( + "fmt" + "time" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -52,13 +55,20 @@ func newCreate() *cobra.Command { cmd := &cobra.Command{} var createReq catalog.CreateOnlineTableRequest + createReq.Table = &catalog.OnlineTable{} var createJson flags.JsonFlag + var createSkipWait bool + var createTimeout time.Duration + + cmd.Flags().BoolVar(&createSkipWait, "no-wait", createSkipWait, `do not wait to reach ACTIVE state`) + cmd.Flags().DurationVar(&createTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach ACTIVE state`) // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Full three-part (catalog, schema, table) name of the table.`) + cmd.Flags().StringVar(&createReq.Table.Name, "name", createReq.Table.Name, `Full three-part (catalog, schema, table) name of the table.`) // TODO: complex arg: spec + // TODO: complex arg: status cmd.Use = "create" cmd.Short = `Create an Online Table.` @@ -79,7 +89,7 @@ func newCreate() *cobra.Command { w := root.WorkspaceClient(ctx) if cmd.Flags().Changed("json") { - diags := createJson.Unmarshal(&createReq) + diags := createJson.Unmarshal(&createReq.Table) if diags.HasError() { return diags.Error() } @@ -91,11 +101,24 @@ func newCreate() *cobra.Command { } } - response, err := w.OnlineTables.Create(ctx, createReq) + wait, err := w.OnlineTables.Create(ctx, createReq) if err != nil { return err } - return cmdio.Render(ctx, response) + if createSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *catalog.OnlineTable) { + status := i.UnityCatalogProvisioningState + statusMessage := fmt.Sprintf("current status: %s", status) + spinner <- statusMessage + }).GetWithTimeout(createTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index d007a425f..ca570351e 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -241,8 +241,9 @@ func newSet() *cobra.Command { cmd.Short = `Set object permissions.` cmd.Long = `Set object permissions. - Sets permissions on an object. Objects can inherit permissions from their - parent objects or root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their parent objects or root object. Arguments: REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index 5bd94e0b6..38636e83b 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -691,8 +691,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set pipeline permissions.` cmd.Long = `Set pipeline permissions. - Sets permissions on a pipeline. Pipelines can inherit permissions from their - root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: PIPELINE_ID: The pipeline for which to get or manage permissions.` @@ -972,6 +973,7 @@ func newUpdate() *cobra.Command { // TODO: array: notifications cmd.Flags().BoolVar(&updateReq.Photon, "photon", updateReq.Photon, `Whether Photon is enabled for this pipeline.`) cmd.Flags().StringVar(&updateReq.PipelineId, "pipeline-id", updateReq.PipelineId, `Unique identifier for this pipeline.`) + // TODO: complex arg: restart_window cmd.Flags().StringVar(&updateReq.Schema, "schema", updateReq.Schema, `The default schema (database) where tables are read from or published to.`) cmd.Flags().BoolVar(&updateReq.Serverless, "serverless", updateReq.Serverless, `Whether serverless compute is enabled for this pipeline.`) cmd.Flags().StringVar(&updateReq.Storage, "storage", updateReq.Storage, `DBFS root directory for storing checkpoints and tables.`) diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index b77347b06..7dcb13538 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -513,8 +513,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set repo permissions.` cmd.Long = `Set repo permissions. - Sets permissions on a repo. Repos can inherit permissions from their root - object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: REPO_ID: The repo for which to get or manage permissions.` diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 363e9ea16..cc99177c7 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -1008,7 +1008,8 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set serving endpoint permissions.` cmd.Long = `Set serving endpoint permissions. - Sets permissions on a serving endpoint. Serving endpoints can inherit + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit permissions from their root object. Arguments: diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 31e6ceee4..cca77b2d6 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -5,6 +5,8 @@ package settings import ( "github.com/spf13/cobra" + aibi_dashboard_embedding_access_policy "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-access-policy" + aibi_dashboard_embedding_approved_domains "github.com/databricks/cli/cmd/workspace/aibi-dashboard-embedding-approved-domains" automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update" compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile" default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace" @@ -30,6 +32,8 @@ func New() *cobra.Command { } // Add subservices + cmd.AddCommand(aibi_dashboard_embedding_access_policy.New()) + cmd.AddCommand(aibi_dashboard_embedding_approved_domains.New()) cmd.AddCommand(automatic_cluster_update.New()) cmd.AddCommand(compliance_security_profile.New()) cmd.AddCommand(default_namespace.New()) diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index 62c3407f4..f70963f29 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -391,7 +391,6 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the share.`) - cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of share.`) cmd.Flags().StringVar(&updateReq.StorageRoot, "storage-root", updateReq.StorageRoot, `Storage root URL for the share.`) // TODO: array: updates diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 2caf09041..4dc028065 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -360,7 +360,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`) // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`) - cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index 35775f17f..1ef247b6d 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -304,6 +304,7 @@ func newList() *cobra.Command { cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of tables to return.`) cmd.Flags().BoolVar(&listReq.OmitColumns, "omit-columns", listReq.OmitColumns, `Whether to omit the columns of the table from the response or not.`) cmd.Flags().BoolVar(&listReq.OmitProperties, "omit-properties", listReq.OmitProperties, `Whether to omit the properties of the table from the response or not.`) + cmd.Flags().BoolVar(&listReq.OmitUsername, "omit-username", listReq.OmitUsername, `Whether to omit the username of the table (e.g.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`) cmd.Use = "list CATALOG_NAME SCHEMA_NAME" diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index 6deb8d125..c8d57fd6d 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -448,8 +448,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set token permissions.` cmd.Long = `Set token permissions. - Sets permissions on all tokens. Tokens can inherit permissions from their root - object.` + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index b085ab413..e787446af 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -542,8 +542,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set password permissions.` cmd.Long = `Set password permissions. - Sets permissions on all passwords. Passwords can inherit permissions from - their root object.` + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index 43d6c8ab9..03925bd70 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -686,8 +686,9 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set SQL warehouse permissions.` cmd.Long = `Set SQL warehouse permissions. - Sets permissions on a SQL warehouse. SQL warehouses can inherit permissions - from their root object. + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit + permissions from their root object. Arguments: WAREHOUSE_ID: The SQL warehouse for which to get or manage permissions.` diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 21da478c4..61e1437a1 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -447,6 +447,7 @@ func newImport() *cobra.Command { DBC, HTML, JUPYTER, + RAW, R_MARKDOWN, SOURCE, ]`) @@ -708,7 +709,8 @@ func newSetPermissions() *cobra.Command { cmd.Short = `Set workspace object permissions.` cmd.Long = `Set workspace object permissions. - Sets permissions on a workspace object. Workspace objects can inherit + Sets permissions on an object, replacing existing permissions if they exist. + Deletes all direct permissions if none are specified. Objects can inherit permissions from their parent objects or root object. Arguments: diff --git a/go.mod b/go.mod index e33214ebb..9ae5fde0d 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.2 require ( github.com/Masterminds/semver/v3 v3.3.0 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.49.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.51.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 419fa5681..2bfcfb2fa 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.49.0 h1:VBTeZZMLIuBSM4kxOCfUcW9z4FUQZY2QeNRD5qm9FUQ= -github.com/databricks/databricks-sdk-go v0.49.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.51.0 h1:tcvB9TID3oUl0O8npccB5c+33tarBiYMBFbq4U4AB6M= +github.com/databricks/databricks-sdk-go v0.51.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= diff --git a/internal/bundle/dashboards_test.go b/internal/bundle/dashboards_test.go index b12cc040c..3c2e27c62 100644 --- a/internal/bundle/dashboards_test.go +++ b/internal/bundle/dashboards_test.go @@ -46,8 +46,10 @@ func TestAccDashboards(t *testing.T) { // Make an out of band modification to the dashboard and confirm that it is detected. _, err = wt.W.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{ - DashboardId: oi.ResourceId, - SerializedDashboard: dashboard.SerializedDashboard, + DashboardId: oi.ResourceId, + Dashboard: &dashboards.Dashboard{ + SerializedDashboard: dashboard.SerializedDashboard, + }, }) require.NoError(t, err) diff --git a/internal/dashboard_assumptions_test.go b/internal/dashboard_assumptions_test.go index 912e046b5..64294873d 100644 --- a/internal/dashboard_assumptions_test.go +++ b/internal/dashboard_assumptions_test.go @@ -30,10 +30,12 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { dir := wt.TemporaryWorkspaceDir("dashboard-assumptions-") dashboard, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{ - DisplayName: dashboardName, - ParentPath: dir, - SerializedDashboard: string(dashboardPayload), - WarehouseId: warehouseId, + Dashboard: &dashboards.Dashboard{ + DisplayName: dashboardName, + ParentPath: dir, + SerializedDashboard: string(dashboardPayload), + WarehouseId: warehouseId, + }, }) require.NoError(t, err) t.Logf("Dashboard ID (per Lakeview API): %s", dashboard.DashboardId) @@ -62,9 +64,11 @@ func TestAccDashboardAssumptions_WorkspaceImport(t *testing.T) { // Try to overwrite the dashboard via the Lakeview API (and expect failure). { _, err := wt.W.Lakeview.Create(ctx, dashboards.CreateDashboardRequest{ - DisplayName: dashboardName, - ParentPath: dir, - SerializedDashboard: string(dashboardPayload), + Dashboard: &dashboards.Dashboard{ + DisplayName: dashboardName, + ParentPath: dir, + SerializedDashboard: string(dashboardPayload), + }, }) require.ErrorIs(t, err, apierr.ErrResourceAlreadyExists) } From e1978fa4293e5f034a2958ddaf7748fdb76ce9a8 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 14 Nov 2024 03:09:51 +0530 Subject: [PATCH 12/42] Add support for non-Python ipynb notebooks to DABs (#1827) ## Changes ### Background The workspace import APIs recently added support for importing Jupyter notebooks written in R, Scala, or SQL, that is non-Python notebooks. This now works for the `/import-file` API which we leverage in the CLI. Note: We do not need any changes in `databricks sync`. It works out of the box because any state mapping of local names to remote names that we store is only scoped to the notebook extension (i.e., `.ipynb` in this case) and is agnostic of the notebook's specific language. ### Problem this PR addresses The extension-aware filer previously did not function because it checks that a `.ipynb` notebook is written in Python. This PR relaxes that constraint and adds integration tests for both the normal workspace filer and extensions aware filer writing and reading non-Python `.ipynb` notebooks. This implies that after this PR DABs in the workspace / CLI from DBR will work for non-Python notebooks as well. non-Python notebooks for DABs deployment from local machines already works after the platform side changes to the API landed, this PR just adds integration tests for that bit of functionality. Note: Any platform side changes we needed for the import API have already been rolled out to production. ### Before DABs deploy would work fine for non-Python notebooks. But DABs deployments from DBR would not. ### After DABs deploys both from local machines and DBR will work fine. ## Testing For creating the `.ipynb` notebook fixtures used in the integration tests I created them directly from the VSCode UI. This ensures high fidelity with how users will create their non-Python notebooks locally. For Python notebooks this is supported out of the box by VSCode but for R and Scala notebooks this requires installing the Jupyter kernel for R and Scala on my local machine and using that from VSCode. For SQL, I ended up directly modifying the `language_info` field in the Jupyter metadata to create the test fixture. ### Discussion: Issues with configuring language at the cell level The language metadata for a Jupyter notebook is standardized at the notebook level (in the `language_info` field). Unfortunately, it's not standardized at the cell level. Thus, for example, if a user changes the language for their cell in VSCode (which is supported by the standard Jupyter VSCode integration), it'll cause a runtime error when the user actually attempts to run the notebook. This is because the cell-level metadata is encoded in a format specific to VSCode: ``` cells: []{ "vscode": { "languageId": "sql" } } ``` Supporting cell level languages is thus out of scope for this PR and can be revisited along with the workspace files team if there's strong customer interest. --- internal/filer_test.go | 504 ++++++++++-------- internal/helpers.go | 22 +- internal/testdata/notebooks/py1.ipynb | 27 + internal/testdata/notebooks/py2.ipynb | 27 + internal/testdata/notebooks/r1.ipynb | 25 + internal/testdata/notebooks/r2.ipynb | 29 + internal/testdata/notebooks/scala1.ipynb | 38 ++ internal/testdata/notebooks/scala2.ipynb | 38 ++ internal/testdata/notebooks/sql1.ipynb | 20 + internal/testdata/notebooks/sql2.ipynb | 20 + .../workspace_files_extensions_client.go | 33 +- .../workspace_files_extensions_client_test.go | 85 ++- libs/notebook/detect.go | 10 +- libs/notebook/ext.go | 30 +- 14 files changed, 637 insertions(+), 271 deletions(-) create mode 100644 internal/testdata/notebooks/py1.ipynb create mode 100644 internal/testdata/notebooks/py2.ipynb create mode 100644 internal/testdata/notebooks/r1.ipynb create mode 100644 internal/testdata/notebooks/r2.ipynb create mode 100644 internal/testdata/notebooks/scala1.ipynb create mode 100644 internal/testdata/notebooks/scala2.ipynb create mode 100644 internal/testdata/notebooks/sql1.ipynb create mode 100644 internal/testdata/notebooks/sql2.ipynb diff --git a/internal/filer_test.go b/internal/filer_test.go index bc4c94808..20207d343 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -39,7 +39,7 @@ func (f filerTest) assertContents(ctx context.Context, name string, contents str assert.Equal(f, contents, body.String()) } -func (f filerTest) assertContentsJupyter(ctx context.Context, name string) { +func (f filerTest) assertContentsJupyter(ctx context.Context, name string, language string) { reader, err := f.Read(ctx, name) if !assert.NoError(f, err) { return @@ -62,6 +62,7 @@ func (f filerTest) assertContentsJupyter(ctx context.Context, name string) { // Since a roundtrip to the workspace changes a Jupyter notebook's payload, // the best we can do is assert that the nbformat is correct. assert.EqualValues(f, 4, actual["nbformat"]) + assert.Equal(f, language, actual["metadata"].(map[string]any)["language_info"].(map[string]any)["name"]) } func (f filerTest) assertNotExists(ctx context.Context, name string) { @@ -360,146 +361,114 @@ func TestAccFilerReadDir(t *testing.T) { } } -var jupyterNotebookContent1 = ` -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Jupyter Notebook Version 1\")" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 - } -` - -var jupyterNotebookContent2 = ` -{ - "cells": [ - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "print(\"Jupyter Notebook Version 2\")" - ] - } - ], - "metadata": { - "language_info": { - "name": "python" - }, - "orig_nbformat": 4 - }, - "nbformat": 4, - "nbformat_minor": 2 - } -` - -func TestAccFilerWorkspaceNotebookConflict(t *testing.T) { +func TestAccFilerWorkspaceNotebook(t *testing.T) { t.Parallel() - f, _ := setupWsfsFiler(t) ctx := context.Background() var err error - // Upload the notebooks - err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('first upload'))")) - require.NoError(t, err) - err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('first upload'))")) - require.NoError(t, err) - err = f.Write(ctx, "sqlNb.sql", strings.NewReader("-- Databricks notebook source\n SELECT \"first upload\"")) - require.NoError(t, err) - err = f.Write(ctx, "scalaNb.scala", strings.NewReader("// Databricks notebook source\n println(\"first upload\"))")) - require.NoError(t, err) - err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent1)) - require.NoError(t, err) + tcases := []struct { + name string + nameWithoutExt string + content1 string + expected1 string + content2 string + expected2 string + }{ + { + name: "pyNb.py", + nameWithoutExt: "pyNb", + content1: "# Databricks notebook source\nprint('first upload')", + expected1: "# Databricks notebook source\nprint('first upload')", + content2: "# Databricks notebook source\nprint('second upload')", + expected2: "# Databricks notebook source\nprint('second upload')", + }, + { + name: "rNb.r", + nameWithoutExt: "rNb", + content1: "# Databricks notebook source\nprint('first upload')", + expected1: "# Databricks notebook source\nprint('first upload')", + content2: "# Databricks notebook source\nprint('second upload')", + expected2: "# Databricks notebook source\nprint('second upload')", + }, + { + name: "sqlNb.sql", + nameWithoutExt: "sqlNb", + content1: "-- Databricks notebook source\n SELECT \"first upload\"", + expected1: "-- Databricks notebook source\n SELECT \"first upload\"", + content2: "-- Databricks notebook source\n SELECT \"second upload\"", + expected2: "-- Databricks notebook source\n SELECT \"second upload\"", + }, + { + name: "scalaNb.scala", + nameWithoutExt: "scalaNb", + content1: "// Databricks notebook source\n println(\"first upload\")", + expected1: "// Databricks notebook source\n println(\"first upload\")", + content2: "// Databricks notebook source\n println(\"second upload\")", + expected2: "// Databricks notebook source\n println(\"second upload\")", + }, + { + name: "pythonJupyterNb.ipynb", + nameWithoutExt: "pythonJupyterNb", + content1: readFile(t, "testdata/notebooks/py1.ipynb"), + expected1: "# Databricks notebook source\nprint(1)", + content2: readFile(t, "testdata/notebooks/py2.ipynb"), + expected2: "# Databricks notebook source\nprint(2)", + }, + { + name: "rJupyterNb.ipynb", + nameWithoutExt: "rJupyterNb", + content1: readFile(t, "testdata/notebooks/r1.ipynb"), + expected1: "# Databricks notebook source\nprint(1)", + content2: readFile(t, "testdata/notebooks/r2.ipynb"), + expected2: "# Databricks notebook source\nprint(2)", + }, + { + name: "scalaJupyterNb.ipynb", + nameWithoutExt: "scalaJupyterNb", + content1: readFile(t, "testdata/notebooks/scala1.ipynb"), + expected1: "// Databricks notebook source\nprintln(1)", + content2: readFile(t, "testdata/notebooks/scala2.ipynb"), + expected2: "// Databricks notebook source\nprintln(2)", + }, + { + name: "sqlJupyterNotebook.ipynb", + nameWithoutExt: "sqlJupyterNotebook", + content1: readFile(t, "testdata/notebooks/sql1.ipynb"), + expected1: "-- Databricks notebook source\nselect 1", + content2: readFile(t, "testdata/notebooks/sql2.ipynb"), + expected2: "-- Databricks notebook source\nselect 2", + }, + } - // Assert contents after initial upload - filerTest{t, f}.assertContents(ctx, "pyNb", "# Databricks notebook source\nprint('first upload'))") - filerTest{t, f}.assertContents(ctx, "rNb", "# Databricks notebook source\nprint('first upload'))") - filerTest{t, f}.assertContents(ctx, "sqlNb", "-- Databricks notebook source\n SELECT \"first upload\"") - filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"first upload\"))") - filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 1\")") + for _, tc := range tcases { + f, _ := setupWsfsFiler(t) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - // Assert uploading a second time fails due to overwrite mode missing - err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('second upload'))")) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/pyNb$`), err.Error()) + // Upload the notebook + err = f.Write(ctx, tc.name, strings.NewReader(tc.content1)) + require.NoError(t, err) - err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('second upload'))")) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/rNb$`), err.Error()) + // Assert contents after initial upload. Note that we expect the content + // for jupyter notebooks to be of type source because the workspace files + // client always uses the source format to read notebooks from the workspace. + filerTest{t, f}.assertContents(ctx, tc.nameWithoutExt, tc.expected1) - err = f.Write(ctx, "sqlNb.sql", strings.NewReader("# Databricks notebook source\n SELECT \"second upload\")")) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/sqlNb$`), err.Error()) + // Assert uploading a second time fails due to overwrite mode missing + err = f.Write(ctx, tc.name, strings.NewReader(tc.content2)) + assert.ErrorIs(t, err, fs.ErrExist) + assert.Regexp(t, regexp.MustCompile(`file already exists: .*/`+tc.nameWithoutExt+`$`), err.Error()) - err = f.Write(ctx, "scalaNb.scala", strings.NewReader("# Databricks notebook source\n println(\"second upload\"))")) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/scalaNb$`), err.Error()) + // Try uploading the notebook again with overwrite flag. This time it should succeed. + err = f.Write(ctx, tc.name, strings.NewReader(tc.content2), filer.OverwriteIfExists) + require.NoError(t, err) - err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent2)) - assert.ErrorIs(t, err, fs.ErrExist) - assert.Regexp(t, regexp.MustCompile(`file already exists: .*/jupyterNb$`), err.Error()) -} + // Assert contents after second upload + filerTest{t, f}.assertContents(ctx, tc.nameWithoutExt, tc.expected2) + }) + } -func TestAccFilerWorkspaceNotebookWithOverwriteFlag(t *testing.T) { - t.Parallel() - - f, _ := setupWsfsFiler(t) - ctx := context.Background() - var err error - - // Upload notebooks - err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('first upload'))")) - require.NoError(t, err) - err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('first upload'))")) - require.NoError(t, err) - err = f.Write(ctx, "sqlNb.sql", strings.NewReader("-- Databricks notebook source\n SELECT \"first upload\"")) - require.NoError(t, err) - err = f.Write(ctx, "scalaNb.scala", strings.NewReader("// Databricks notebook source\n println(\"first upload\"))")) - require.NoError(t, err) - err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent1)) - require.NoError(t, err) - - // Assert contents after initial upload - filerTest{t, f}.assertContents(ctx, "pyNb", "# Databricks notebook source\nprint('first upload'))") - filerTest{t, f}.assertContents(ctx, "rNb", "# Databricks notebook source\nprint('first upload'))") - filerTest{t, f}.assertContents(ctx, "sqlNb", "-- Databricks notebook source\n SELECT \"first upload\"") - filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"first upload\"))") - filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 1\")") - - // Upload notebooks a second time, overwriting the initial uplaods - err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint('second upload'))"), filer.OverwriteIfExists) - require.NoError(t, err) - err = f.Write(ctx, "rNb.r", strings.NewReader("# Databricks notebook source\nprint('second upload'))"), filer.OverwriteIfExists) - require.NoError(t, err) - err = f.Write(ctx, "sqlNb.sql", strings.NewReader("-- Databricks notebook source\n SELECT \"second upload\""), filer.OverwriteIfExists) - require.NoError(t, err) - err = f.Write(ctx, "scalaNb.scala", strings.NewReader("// Databricks notebook source\n println(\"second upload\"))"), filer.OverwriteIfExists) - require.NoError(t, err) - err = f.Write(ctx, "jupyterNb.ipynb", strings.NewReader(jupyterNotebookContent2), filer.OverwriteIfExists) - require.NoError(t, err) - - // Assert contents have been overwritten - filerTest{t, f}.assertContents(ctx, "pyNb", "# Databricks notebook source\nprint('second upload'))") - filerTest{t, f}.assertContents(ctx, "rNb", "# Databricks notebook source\nprint('second upload'))") - filerTest{t, f}.assertContents(ctx, "sqlNb", "-- Databricks notebook source\n SELECT \"second upload\"") - filerTest{t, f}.assertContents(ctx, "scalaNb", "// Databricks notebook source\n println(\"second upload\"))") - filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 2\")") } func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { @@ -515,11 +484,13 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { {"foo.r", "print('foo')"}, {"foo.scala", "println('foo')"}, {"foo.sql", "SELECT 'foo'"}, - {"jupyterNb.ipynb", jupyterNotebookContent1}, - {"jupyterNb2.ipynb", jupyterNotebookContent2}, + {"py1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, {"pyNb.py", "# Databricks notebook source\nprint('first upload'))"}, + {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, {"rNb.r", "# Databricks notebook source\nprint('first upload'))"}, + {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, {"scalaNb.scala", "// Databricks notebook source\n println(\"first upload\"))"}, + {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, {"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""}, } @@ -554,11 +525,13 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { "foo.r", "foo.scala", "foo.sql", - "jupyterNb.ipynb", - "jupyterNb2.ipynb", + "py1.ipynb", "pyNb.py", + "r1.ipynb", "rNb.r", + "scala1.ipynb", "scalaNb.scala", + "sql1.ipynb", "sqlNb.sql", }, names) @@ -582,7 +555,10 @@ func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { }{ {"foo.py", "# Databricks notebook source\nprint('first upload'))"}, {"bar.py", "print('foo')"}, - {"jupyter.ipynb", jupyterNotebookContent1}, + {"p1.ipynb", readFile(t, "testdata/notebooks/py1.ipynb")}, + {"r1.ipynb", readFile(t, "testdata/notebooks/r1.ipynb")}, + {"scala1.ipynb", readFile(t, "testdata/notebooks/scala1.ipynb")}, + {"sql1.ipynb", readFile(t, "testdata/notebooks/sql1.ipynb")}, {"pretender", "not a notebook"}, {"dir/file.txt", "file content"}, {"scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')"}, @@ -608,11 +584,15 @@ func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { // Read contents of test fixtures as a sanity check. filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('first upload'))") filerTest{t, wf}.assertContents(ctx, "bar.py", "print('foo')") - filerTest{t, wf}.assertContentsJupyter(ctx, "jupyter.ipynb") filerTest{t, wf}.assertContents(ctx, "dir/file.txt", "file content") filerTest{t, wf}.assertContents(ctx, "scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')") filerTest{t, wf}.assertContents(ctx, "pretender", "not a notebook") + filerTest{t, wf}.assertContentsJupyter(ctx, "p1.ipynb", "python") + filerTest{t, wf}.assertContentsJupyter(ctx, "r1.ipynb", "r") + filerTest{t, wf}.assertContentsJupyter(ctx, "scala1.ipynb", "scala") + filerTest{t, wf}.assertContentsJupyter(ctx, "sql1.ipynb", "sql") + // Read non-existent file _, err := wf.Read(ctx, "non-existent.py") assert.ErrorIs(t, err, fs.ErrNotExist) @@ -638,35 +618,41 @@ func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { ctx := context.Background() wf := setupFilerWithExtensionsTest(t) - // Delete notebook - err := wf.Delete(ctx, "foo.py") - require.NoError(t, err) - filerTest{t, wf}.assertNotExists(ctx, "foo.py") + for _, fileName := range []string{ + // notebook + "foo.py", + // file + "bar.py", + // python jupyter notebook + "p1.ipynb", + // R jupyter notebook + "r1.ipynb", + // Scala jupyter notebook + "scala1.ipynb", + // SQL jupyter notebook + "sql1.ipynb", + } { + err := wf.Delete(ctx, fileName) + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, fileName) + } - // Delete file - err = wf.Delete(ctx, "bar.py") - require.NoError(t, err) - filerTest{t, wf}.assertNotExists(ctx, "bar.py") - - // Delete jupyter notebook - err = wf.Delete(ctx, "jupyter.ipynb") - require.NoError(t, err) - filerTest{t, wf}.assertNotExists(ctx, "jupyter.ipynb") - - // Delete non-existent file - err = wf.Delete(ctx, "non-existent.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - // Ensure we do not delete a file as a notebook - err = wf.Delete(ctx, "pretender.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - // Ensure we do not delete a Scala notebook as a Python notebook - _, err = wf.Read(ctx, "scala-notebook.py") - assert.ErrorIs(t, err, fs.ErrNotExist) + for _, fileName := range []string{ + // do not delete non-existent file + "non-existent.py", + // do not delete a file assuming it is a notebook and stripping the extension + "pretender.py", + // do not delete a Scala notebook as a Python notebook + "scala-notebook.py", + // do not delete a file assuming it is a Jupyter notebook and stripping the extension + "pretender.ipynb", + } { + err := wf.Delete(ctx, fileName) + assert.ErrorIs(t, err, fs.ErrNotExist) + } // Delete directory - err = wf.Delete(ctx, "dir") + err := wf.Delete(ctx, "dir") assert.ErrorIs(t, err, fs.ErrInvalid) // Delete directory recursively @@ -681,44 +667,45 @@ func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { ctx := context.Background() wf := setupFilerWithExtensionsTest(t) - // Stat on a notebook - info, err := wf.Stat(ctx, "foo.py") - require.NoError(t, err) - assert.Equal(t, "foo.py", info.Name()) - assert.False(t, info.IsDir()) - - // Stat on a file - info, err = wf.Stat(ctx, "bar.py") - require.NoError(t, err) - assert.Equal(t, "bar.py", info.Name()) - assert.False(t, info.IsDir()) - - // Stat on a Jupyter notebook - info, err = wf.Stat(ctx, "jupyter.ipynb") - require.NoError(t, err) - assert.Equal(t, "jupyter.ipynb", info.Name()) - assert.False(t, info.IsDir()) + for _, fileName := range []string{ + // notebook + "foo.py", + // file + "bar.py", + // python jupyter notebook + "p1.ipynb", + // R jupyter notebook + "r1.ipynb", + // Scala jupyter notebook + "scala1.ipynb", + // SQL jupyter notebook + "sql1.ipynb", + } { + info, err := wf.Stat(ctx, fileName) + require.NoError(t, err) + assert.Equal(t, fileName, info.Name()) + assert.False(t, info.IsDir()) + } // Stat on a directory - info, err = wf.Stat(ctx, "dir") + info, err := wf.Stat(ctx, "dir") require.NoError(t, err) assert.Equal(t, "dir", info.Name()) assert.True(t, info.IsDir()) - // Stat on a non-existent file - _, err = wf.Stat(ctx, "non-existent.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - // Ensure we do not stat a file as a notebook - _, err = wf.Stat(ctx, "pretender.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - // Ensure we do not stat a Scala notebook as a Python notebook - _, err = wf.Stat(ctx, "scala-notebook.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - - _, err = wf.Stat(ctx, "pretender.ipynb") - assert.ErrorIs(t, err, fs.ErrNotExist) + for _, fileName := range []string{ + // non-existent file + "non-existent.py", + // do not stat a file assuming it is a notebook and stripping the extension + "pretender.py", + // do not stat a Scala notebook as a Python notebook + "scala-notebook.py", + // do not read a regular file assuming it is a Jupyter notebook and stripping the extension + "pretender.ipynb", + } { + _, err := wf.Stat(ctx, fileName) + assert.ErrorIs(t, err, fs.ErrNotExist) + } } func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { @@ -739,32 +726,115 @@ func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { t.Parallel() - ctx := context.Background() - wf, _ := setupWsfsExtensionsFiler(t) + // Case 1: Writing source notebooks. + for _, tc := range []struct { + language string + sourceName string + sourceContent string + jupyterName string + jupyterContent string + }{ + { + language: "python", + sourceName: "foo.py", + sourceContent: "# Databricks notebook source\nprint('foo')", + jupyterName: "foo.ipynb", + }, + { + language: "r", + sourceName: "foo.r", + sourceContent: "# Databricks notebook source\nprint('foo')", + jupyterName: "foo.ipynb", + }, + { + language: "scala", + sourceName: "foo.scala", + sourceContent: "// Databricks notebook source\nprintln('foo')", + jupyterName: "foo.ipynb", + }, + { + language: "sql", + sourceName: "foo.sql", + sourceContent: "-- Databricks notebook source\nselect 'foo'", + jupyterName: "foo.ipynb", + }, + } { + t.Run("source_"+tc.language, func(t *testing.T) { + t.Parallel() - // Case 1: Source Notebook - err := wf.Write(ctx, "foo.py", strings.NewReader("# Databricks notebook source\nprint('foo')")) - require.NoError(t, err) + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) - // The source notebook should exist but not the Jupyter notebook - filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('foo')") - _, err = wf.Stat(ctx, "foo.ipynb") - assert.ErrorIs(t, err, fs.ErrNotExist) - _, err = wf.Read(ctx, "foo.ipynb") - assert.ErrorIs(t, err, fs.ErrNotExist) - err = wf.Delete(ctx, "foo.ipynb") - assert.ErrorIs(t, err, fs.ErrNotExist) + err := wf.Write(ctx, tc.sourceName, strings.NewReader(tc.sourceContent)) + require.NoError(t, err) - // Case 2: Jupyter Notebook - err = wf.Write(ctx, "bar.ipynb", strings.NewReader(jupyterNotebookContent1)) - require.NoError(t, err) + // Assert on the content of the source notebook that's been written. + filerTest{t, wf}.assertContents(ctx, tc.sourceName, tc.sourceContent) - // The Jupyter notebook should exist but not the source notebook - filerTest{t, wf}.assertContentsJupyter(ctx, "bar.ipynb") - _, err = wf.Stat(ctx, "bar.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - _, err = wf.Read(ctx, "bar.py") - assert.ErrorIs(t, err, fs.ErrNotExist) - err = wf.Delete(ctx, "bar.py") - assert.ErrorIs(t, err, fs.ErrNotExist) + // Ensure that the source notebook is not read when the name contains + // the .ipynb extension. + _, err = wf.Stat(ctx, tc.jupyterName) + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, tc.jupyterName) + assert.ErrorIs(t, err, fs.ErrNotExist) + err = wf.Delete(ctx, tc.jupyterName) + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } + + // Case 2: Writing Jupyter notebooks. + for _, tc := range []struct { + language string + sourceName string + jupyterName string + jupyterContent string + }{ + { + language: "python", + sourceName: "foo.py", + jupyterName: "foo.ipynb", + jupyterContent: readFile(t, "testdata/notebooks/py1.ipynb"), + }, + { + language: "r", + sourceName: "foo.r", + jupyterName: "foo.ipynb", + jupyterContent: readFile(t, "testdata/notebooks/r1.ipynb"), + }, + { + language: "scala", + sourceName: "foo.scala", + jupyterName: "foo.ipynb", + jupyterContent: readFile(t, "testdata/notebooks/scala1.ipynb"), + }, + { + language: "sql", + sourceName: "foo.sql", + jupyterName: "foo.ipynb", + jupyterContent: readFile(t, "testdata/notebooks/sql1.ipynb"), + }, + } { + t.Run("jupyter_"+tc.language, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + err := wf.Write(ctx, tc.jupyterName, strings.NewReader(tc.jupyterContent)) + require.NoError(t, err) + + // Assert that the written notebook is jupyter and has the correct + // language_info metadata set. + filerTest{t, wf}.assertContentsJupyter(ctx, tc.jupyterName, tc.language) + + // Ensure that the Jupyter notebook is not read when the name does not + // contain the .ipynb extension. + _, err = wf.Stat(ctx, tc.sourceName) + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, tc.sourceName) + assert.ErrorIs(t, err, fs.ErrNotExist) + err = wf.Delete(ctx, tc.sourceName) + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } diff --git a/internal/helpers.go b/internal/helpers.go index 3bf387757..3e4b4e97c 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -352,6 +352,13 @@ func RequireErrorRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer, return stdout, stderr, err } +func readFile(t *testing.T, name string) string { + b, err := os.ReadFile(name) + require.NoError(t, err) + + return string(b) +} + func writeFile(t *testing.T, name string, body string) string { f, err := os.Create(filepath.Join(t.TempDir(), name)) require.NoError(t, err) @@ -562,12 +569,10 @@ func setupLocalFiler(t *testing.T) (filer.Filer, string) { } func setupWsfsFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + ctx, wt := acc.WorkspaceTest(t) - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) - f, err := filer.NewWorkspaceFilesClient(w, tmpdir) + tmpdir := TemporaryWorkspaceDir(t, wt.W) + f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) require.NoError(t, err) // Check if we can use this API here, skip test if we cannot. @@ -581,11 +586,10 @@ func setupWsfsFiler(t *testing.T) (filer.Filer, string) { } func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + _, wt := acc.WorkspaceTest(t) - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) - f, err := filer.NewWorkspaceFilesExtensionsClient(w, tmpdir) + tmpdir := TemporaryWorkspaceDir(t, wt.W) + f, err := filer.NewWorkspaceFilesExtensionsClient(wt.W, tmpdir) require.NoError(t, err) return f, tmpdir diff --git a/internal/testdata/notebooks/py1.ipynb b/internal/testdata/notebooks/py1.ipynb new file mode 100644 index 000000000..0a44ce0ee --- /dev/null +++ b/internal/testdata/notebooks/py1.ipynb @@ -0,0 +1,27 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(1)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.8.13" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/py2.ipynb b/internal/testdata/notebooks/py2.ipynb new file mode 100644 index 000000000..8b2ccde1f --- /dev/null +++ b/internal/testdata/notebooks/py2.ipynb @@ -0,0 +1,27 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(2)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "name": "python", + "version": "3.8.13" + }, + "orig_nbformat": 4 + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/r1.ipynb b/internal/testdata/notebooks/r1.ipynb new file mode 100644 index 000000000..6280426a3 --- /dev/null +++ b/internal/testdata/notebooks/r1.ipynb @@ -0,0 +1,25 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(1)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "R", + "language": "R", + "name": "ir" + }, + "language_info": { + "name": "R" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/r2.ipynb b/internal/testdata/notebooks/r2.ipynb new file mode 100644 index 000000000..f2ff413d2 --- /dev/null +++ b/internal/testdata/notebooks/r2.ipynb @@ -0,0 +1,29 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "vscode": { + "languageId": "r" + } + }, + "outputs": [], + "source": [ + "print(2)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "R", + "language": "R", + "name": "ir" + }, + "language_info": { + "name": "R" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/scala1.ipynb b/internal/testdata/notebooks/scala1.ipynb new file mode 100644 index 000000000..25a5a187b --- /dev/null +++ b/internal/testdata/notebooks/scala1.ipynb @@ -0,0 +1,38 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1\n" + ] + } + ], + "source": [ + "println(1)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Scala", + "language": "scala", + "name": "scala" + }, + "language_info": { + "codemirror_mode": "text/x-scala", + "file_extension": ".sc", + "mimetype": "text/x-scala", + "name": "scala", + "nbconvert_exporter": "script", + "version": "2.13.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/scala2.ipynb b/internal/testdata/notebooks/scala2.ipynb new file mode 100644 index 000000000..353fc29ff --- /dev/null +++ b/internal/testdata/notebooks/scala2.ipynb @@ -0,0 +1,38 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 1, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "1\n" + ] + } + ], + "source": [ + "println(2)" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Scala", + "language": "scala", + "name": "scala" + }, + "language_info": { + "codemirror_mode": "text/x-scala", + "file_extension": ".sc", + "mimetype": "text/x-scala", + "name": "scala", + "nbconvert_exporter": "script", + "version": "2.13.14" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/sql1.ipynb b/internal/testdata/notebooks/sql1.ipynb new file mode 100644 index 000000000..7a3562a16 --- /dev/null +++ b/internal/testdata/notebooks/sql1.ipynb @@ -0,0 +1,20 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "select 1" + ] + } + ], + "metadata": { + "language_info": { + "name": "sql" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/internal/testdata/notebooks/sql2.ipynb b/internal/testdata/notebooks/sql2.ipynb new file mode 100644 index 000000000..7780e1daf --- /dev/null +++ b/internal/testdata/notebooks/sql2.ipynb @@ -0,0 +1,20 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "select 2" + ] + } + ], + "metadata": { + "language_info": { + "name": "sql" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index b24ecf7ee..53b77dd5b 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -7,6 +7,7 @@ import ( "io" "io/fs" "path" + "slices" "strings" "github.com/databricks/cli/libs/log" @@ -23,14 +24,6 @@ type workspaceFilesExtensionsClient struct { readonly bool } -var extensionsToLanguages = map[string]workspace.Language{ - ".py": workspace.LanguagePython, - ".r": workspace.LanguageR, - ".scala": workspace.LanguageScala, - ".sql": workspace.LanguageSql, - ".ipynb": workspace.LanguagePython, -} - type workspaceFileStatus struct { wsfsFileInfo @@ -54,7 +47,12 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx contex nameWithoutExt := strings.TrimSuffix(name, ext) // File name does not have an extension associated with Databricks notebooks, return early. - if _, ok := extensionsToLanguages[ext]; !ok { + if !slices.Contains([]string{ + notebook.ExtensionPython, + notebook.ExtensionR, + notebook.ExtensionScala, + notebook.ExtensionSql, + notebook.ExtensionJupyter}, ext) { return nil, nil } @@ -75,22 +73,23 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx contex return nil, nil } - // Not the correct language. Return early. - if stat.Language != extensionsToLanguages[ext] { - log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not of the correct language. Expected %s but found %s.", name, path.Join(w.root, nameWithoutExt), extensionsToLanguages[ext], stat.Language) + // Not the correct language. Return early. Note: All languages are supported + // for Jupyter notebooks. + if ext != notebook.ExtensionJupyter && stat.Language != notebook.ExtensionToLanguage[ext] { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not of the correct language. Expected %s but found %s.", name, path.Join(w.root, nameWithoutExt), notebook.ExtensionToLanguage[ext], stat.Language) return nil, nil } - // When the extension is .py we expect the export format to be source. + // For non-jupyter notebooks the export format should be source. // If it's not, return early. - if ext == ".py" && stat.ReposExportFormat != workspace.ExportFormatSource { + if ext != notebook.ExtensionJupyter && stat.ReposExportFormat != workspace.ExportFormatSource { log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not exported as a source notebook. Its export format is %s.", name, path.Join(w.root, nameWithoutExt), stat.ReposExportFormat) return nil, nil } // When the extension is .ipynb we expect the export format to be Jupyter. // If it's not, return early. - if ext == ".ipynb" && stat.ReposExportFormat != workspace.ExportFormatJupyter { + if ext == notebook.ExtensionJupyter && stat.ReposExportFormat != workspace.ExportFormatJupyter { log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not exported as a Jupyter notebook. Its export format is %s.", name, path.Join(w.root, nameWithoutExt), stat.ReposExportFormat) return nil, nil } @@ -120,8 +119,8 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx con ext := notebook.GetExtensionByLanguage(&stat.ObjectInfo) // If the notebook was exported as a Jupyter notebook, the extension should be .ipynb. - if stat.Language == workspace.LanguagePython && stat.ReposExportFormat == workspace.ExportFormatJupyter { - ext = ".ipynb" + if stat.ReposExportFormat == workspace.ExportFormatJupyter { + ext = notebook.ExtensionJupyter } // Modify the stat object path to include the extension. This stat object will be used diff --git a/libs/filer/workspace_files_extensions_client_test.go b/libs/filer/workspace_files_extensions_client_test.go index 321c43712..974a6a37b 100644 --- a/libs/filer/workspace_files_extensions_client_test.go +++ b/libs/filer/workspace_files_extensions_client_test.go @@ -37,7 +37,7 @@ func TestFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { expectedError string }{ { - name: "python source notebook and file", + name: "python source notebook and file with source extension", language: workspace.LanguagePython, notebookExportFormat: workspace.ExportFormatSource, notebookPath: "/dir/foo", @@ -45,7 +45,31 @@ func TestFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.py resolve to the same name /foo.py. Changing the name of one of these objects will resolve this issue", }, { - name: "python jupyter notebook and file", + name: "scala source notebook and file with source extension", + language: workspace.LanguageScala, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.scala", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.scala resolve to the same name /foo.scala. Changing the name of one of these objects will resolve this issue", + }, + { + name: "r source notebook and file with source extension", + language: workspace.LanguageR, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.r", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.r resolve to the same name /foo.r. Changing the name of one of these objects will resolve this issue", + }, + { + name: "sql source notebook and file with source extension", + language: workspace.LanguageSql, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.sql", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.sql resolve to the same name /foo.sql. Changing the name of one of these objects will resolve this issue", + }, + { + name: "python jupyter notebook and file with source extension", language: workspace.LanguagePython, notebookExportFormat: workspace.ExportFormatJupyter, notebookPath: "/dir/foo", @@ -54,37 +78,64 @@ func TestFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { expectedError: "", }, { - name: "scala source notebook and file", + name: "scala jupyter notebook and file with source extension", language: workspace.LanguageScala, - notebookExportFormat: workspace.ExportFormatSource, + notebookExportFormat: workspace.ExportFormatJupyter, notebookPath: "/dir/foo", filePath: "/dir/foo.scala", - expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.scala resolve to the same name /foo.scala. Changing the name of one of these objects will resolve this issue", + // Jupyter notebooks would correspond to foo.ipynb so an error is not expected. + expectedError: "", }, { - name: "r source notebook and file", - language: workspace.LanguageR, - notebookExportFormat: workspace.ExportFormatSource, - notebookPath: "/dir/foo", - filePath: "/dir/foo.r", - expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.r resolve to the same name /foo.r. Changing the name of one of these objects will resolve this issue", - }, - { - name: "sql source notebook and file", + name: "sql jupyter notebook and file with source extension", language: workspace.LanguageSql, - notebookExportFormat: workspace.ExportFormatSource, + notebookExportFormat: workspace.ExportFormatJupyter, notebookPath: "/dir/foo", filePath: "/dir/foo.sql", - expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.sql resolve to the same name /foo.sql. Changing the name of one of these objects will resolve this issue", + // Jupyter notebooks would correspond to foo.ipynb so an error is not expected. + expectedError: "", }, { - name: "python jupyter notebook and file", + name: "r jupyter notebook and file with source extension", + language: workspace.LanguageR, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.sql", + // Jupyter notebooks would correspond to foo.ipynb so an error is not expected. + expectedError: "", + }, + { + name: "python jupyter notebook and file with .ipynb extension", language: workspace.LanguagePython, notebookExportFormat: workspace.ExportFormatJupyter, notebookPath: "/dir/foo", filePath: "/dir/foo.ipynb", expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.ipynb resolve to the same name /foo.ipynb. Changing the name of one of these objects will resolve this issue", }, + { + name: "scala jupyter notebook and file with .ipynb extension", + language: workspace.LanguageScala, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.ipynb", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.ipynb resolve to the same name /foo.ipynb. Changing the name of one of these objects will resolve this issue", + }, + { + name: "r jupyter notebook and file with .ipynb extension", + language: workspace.LanguageR, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.ipynb", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.ipynb resolve to the same name /foo.ipynb. Changing the name of one of these objects will resolve this issue", + }, + { + name: "sql jupyter notebook and file with .ipynb extension", + language: workspace.LanguageSql, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.ipynb", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.ipynb resolve to the same name /foo.ipynb. Changing the name of one of these objects will resolve this issue", + }, } { t.Run(tc.name, func(t *testing.T) { mockedWorkspaceClient := mocks.NewMockWorkspaceClient(t) diff --git a/libs/notebook/detect.go b/libs/notebook/detect.go index 582a88479..cd8680bfa 100644 --- a/libs/notebook/detect.go +++ b/libs/notebook/detect.go @@ -107,19 +107,19 @@ func DetectWithFS(fsys fs.FS, name string) (notebook bool, language workspace.La // Determine which header to expect based on filename extension. ext := strings.ToLower(filepath.Ext(name)) switch ext { - case ".py": + case ExtensionPython: header = `# Databricks notebook source` language = workspace.LanguagePython - case ".r": + case ExtensionR: header = `# Databricks notebook source` language = workspace.LanguageR - case ".scala": + case ExtensionScala: header = "// Databricks notebook source" language = workspace.LanguageScala - case ".sql": + case ExtensionSql: header = "-- Databricks notebook source" language = workspace.LanguageSql - case ".ipynb": + case ExtensionJupyter: return DetectJupyterWithFS(fsys, name) default: return false, "", nil diff --git a/libs/notebook/ext.go b/libs/notebook/ext.go index 28d08c11a..c34ad2cc9 100644 --- a/libs/notebook/ext.go +++ b/libs/notebook/ext.go @@ -2,22 +2,40 @@ package notebook import "github.com/databricks/databricks-sdk-go/service/workspace" +const ( + ExtensionNone string = "" + ExtensionPython string = ".py" + ExtensionR string = ".r" + ExtensionScala string = ".scala" + ExtensionSql string = ".sql" + ExtensionJupyter string = ".ipynb" +) + +var ExtensionToLanguage = map[string]workspace.Language{ + ExtensionPython: workspace.LanguagePython, + ExtensionR: workspace.LanguageR, + ExtensionScala: workspace.LanguageScala, + ExtensionSql: workspace.LanguageSql, + + // The platform supports all languages (Python, R, Scala, and SQL) for Jupyter notebooks. +} + func GetExtensionByLanguage(objectInfo *workspace.ObjectInfo) string { if objectInfo.ObjectType != workspace.ObjectTypeNotebook { - return "" + return ExtensionNone } switch objectInfo.Language { case workspace.LanguagePython: - return ".py" + return ExtensionPython case workspace.LanguageR: - return ".r" + return ExtensionR case workspace.LanguageScala: - return ".scala" + return ExtensionScala case workspace.LanguageSql: - return ".sql" + return ExtensionSql default: // Do not add any extension to the file name - return "" + return ExtensionNone } } From 21d27885dc4a82264bf3f74d4bbd598e223e6684 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 14 Nov 2024 14:00:15 +0100 Subject: [PATCH 13/42] Upgrade TF provider to 1.58.0 (#1900) ## Changes Notable changes: * Adds support for `restart_window` for pipelines. * Fix drift for pipelines where `catalog` contains uppercase characters. * Better error message if single-node job clusters are incorrectly configured. See: * https://github.com/databricks/terraform-provider-databricks/releases/tag/v1.58.0 * https://github.com/databricks/terraform-provider-databricks/releases/tag/v1.57.0 * https://github.com/databricks/terraform-provider-databricks/releases/tag/v1.56.0 * https://github.com/databricks/terraform-provider-databricks/releases/tag/v1.55.0 ## Tests Integration tests pass. --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../tf/schema/data_source_functions.go | 98 +++++++++++++++++++ .../schema/data_source_storage_credential.go | 1 + .../internal/tf/schema/data_source_volumes.go | 1 - bundle/internal/tf/schema/data_sources.go | 2 + bundle/internal/tf/schema/resource_alert.go | 46 +++++++++ .../schema/resource_custom_app_integration.go | 23 +++++ bundle/internal/tf/schema/resource_library.go | 18 ++-- .../internal/tf/schema/resource_pipeline.go | 8 ++ bundle/internal/tf/schema/resource_query.go | 84 ++++++++++++++++ bundle/internal/tf/schema/resources.go | 6 ++ bundle/internal/tf/schema/root.go | 2 +- 12 files changed, 279 insertions(+), 12 deletions(-) create mode 100644 bundle/internal/tf/schema/data_source_functions.go create mode 100644 bundle/internal/tf/schema/resource_alert.go create mode 100644 bundle/internal/tf/schema/resource_custom_app_integration.go create mode 100644 bundle/internal/tf/schema/resource_query.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 0c4244089..cfbc46c08 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.54.0" +const ProviderVersion = "1.58.0" diff --git a/bundle/internal/tf/schema/data_source_functions.go b/bundle/internal/tf/schema/data_source_functions.go new file mode 100644 index 000000000..6085d7522 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_functions.go @@ -0,0 +1,98 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceFunctionsFunctionsInputParamsParameters struct { + Comment string `json:"comment,omitempty"` + Name string `json:"name"` + ParameterDefault string `json:"parameter_default,omitempty"` + ParameterMode string `json:"parameter_mode,omitempty"` + ParameterType string `json:"parameter_type,omitempty"` + Position int `json:"position"` + TypeIntervalType string `json:"type_interval_type,omitempty"` + TypeJson string `json:"type_json,omitempty"` + TypeName string `json:"type_name"` + TypePrecision int `json:"type_precision,omitempty"` + TypeScale int `json:"type_scale,omitempty"` + TypeText string `json:"type_text"` +} + +type DataSourceFunctionsFunctionsInputParams struct { + Parameters []DataSourceFunctionsFunctionsInputParamsParameters `json:"parameters,omitempty"` +} + +type DataSourceFunctionsFunctionsReturnParamsParameters struct { + Comment string `json:"comment,omitempty"` + Name string `json:"name"` + ParameterDefault string `json:"parameter_default,omitempty"` + ParameterMode string `json:"parameter_mode,omitempty"` + ParameterType string `json:"parameter_type,omitempty"` + Position int `json:"position"` + TypeIntervalType string `json:"type_interval_type,omitempty"` + TypeJson string `json:"type_json,omitempty"` + TypeName string `json:"type_name"` + TypePrecision int `json:"type_precision,omitempty"` + TypeScale int `json:"type_scale,omitempty"` + TypeText string `json:"type_text"` +} + +type DataSourceFunctionsFunctionsReturnParams struct { + Parameters []DataSourceFunctionsFunctionsReturnParamsParameters `json:"parameters,omitempty"` +} + +type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction struct { + FunctionFullName string `json:"function_full_name"` +} + +type DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable struct { + TableFullName string `json:"table_full_name"` +} + +type DataSourceFunctionsFunctionsRoutineDependenciesDependencies struct { + Function []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesFunction `json:"function,omitempty"` + Table []DataSourceFunctionsFunctionsRoutineDependenciesDependenciesTable `json:"table,omitempty"` +} + +type DataSourceFunctionsFunctionsRoutineDependencies struct { + Dependencies []DataSourceFunctionsFunctionsRoutineDependenciesDependencies `json:"dependencies,omitempty"` +} + +type DataSourceFunctionsFunctions struct { + BrowseOnly bool `json:"browse_only,omitempty"` + CatalogName string `json:"catalog_name,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + DataType string `json:"data_type,omitempty"` + ExternalLanguage string `json:"external_language,omitempty"` + ExternalName string `json:"external_name,omitempty"` + FullDataType string `json:"full_data_type,omitempty"` + FullName string `json:"full_name,omitempty"` + FunctionId string `json:"function_id,omitempty"` + IsDeterministic bool `json:"is_deterministic,omitempty"` + IsNullCall bool `json:"is_null_call,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + ParameterStyle string `json:"parameter_style,omitempty"` + Properties string `json:"properties,omitempty"` + RoutineBody string `json:"routine_body,omitempty"` + RoutineDefinition string `json:"routine_definition,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + SecurityType string `json:"security_type,omitempty"` + SpecificName string `json:"specific_name,omitempty"` + SqlDataAccess string `json:"sql_data_access,omitempty"` + SqlPath string `json:"sql_path,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + InputParams []DataSourceFunctionsFunctionsInputParams `json:"input_params,omitempty"` + ReturnParams []DataSourceFunctionsFunctionsReturnParams `json:"return_params,omitempty"` + RoutineDependencies []DataSourceFunctionsFunctionsRoutineDependencies `json:"routine_dependencies,omitempty"` +} + +type DataSourceFunctions struct { + CatalogName string `json:"catalog_name"` + IncludeBrowse bool `json:"include_browse,omitempty"` + SchemaName string `json:"schema_name"` + Functions []DataSourceFunctionsFunctions `json:"functions,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_storage_credential.go b/bundle/internal/tf/schema/data_source_storage_credential.go index bf58f2726..95c1afcd4 100644 --- a/bundle/internal/tf/schema/data_source_storage_credential.go +++ b/bundle/internal/tf/schema/data_source_storage_credential.go @@ -35,6 +35,7 @@ type DataSourceStorageCredentialStorageCredentialInfo struct { Comment string `json:"comment,omitempty"` CreatedAt int `json:"created_at,omitempty"` CreatedBy string `json:"created_by,omitempty"` + FullName string `json:"full_name,omitempty"` Id string `json:"id,omitempty"` IsolationMode string `json:"isolation_mode,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_volumes.go b/bundle/internal/tf/schema/data_source_volumes.go index 07bf59338..cafc9e68a 100644 --- a/bundle/internal/tf/schema/data_source_volumes.go +++ b/bundle/internal/tf/schema/data_source_volumes.go @@ -4,7 +4,6 @@ package schema type DataSourceVolumes struct { CatalogName string `json:"catalog_name"` - Id string `json:"id,omitempty"` Ids []string `json:"ids,omitempty"` SchemaName string `json:"schema_name"` } diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index 050e0bc1d..e32609b0f 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -21,6 +21,7 @@ type DataSources struct { Directory map[string]any `json:"databricks_directory,omitempty"` ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"` + Functions map[string]any `json:"databricks_functions,omitempty"` Group map[string]any `json:"databricks_group,omitempty"` InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` @@ -79,6 +80,7 @@ func NewDataSources() *DataSources { Directory: make(map[string]any), ExternalLocation: make(map[string]any), ExternalLocations: make(map[string]any), + Functions: make(map[string]any), Group: make(map[string]any), InstancePool: make(map[string]any), InstanceProfiles: make(map[string]any), diff --git a/bundle/internal/tf/schema/resource_alert.go b/bundle/internal/tf/schema/resource_alert.go new file mode 100644 index 000000000..c539d5fe4 --- /dev/null +++ b/bundle/internal/tf/schema/resource_alert.go @@ -0,0 +1,46 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceAlertConditionOperandColumn struct { + Name string `json:"name"` +} + +type ResourceAlertConditionOperand struct { + Column *ResourceAlertConditionOperandColumn `json:"column,omitempty"` +} + +type ResourceAlertConditionThresholdValue struct { + BoolValue bool `json:"bool_value,omitempty"` + DoubleValue int `json:"double_value,omitempty"` + StringValue string `json:"string_value,omitempty"` +} + +type ResourceAlertConditionThreshold struct { + Value *ResourceAlertConditionThresholdValue `json:"value,omitempty"` +} + +type ResourceAlertCondition struct { + EmptyResultState string `json:"empty_result_state,omitempty"` + Op string `json:"op"` + Operand *ResourceAlertConditionOperand `json:"operand,omitempty"` + Threshold *ResourceAlertConditionThreshold `json:"threshold,omitempty"` +} + +type ResourceAlert struct { + CreateTime string `json:"create_time,omitempty"` + CustomBody string `json:"custom_body,omitempty"` + CustomSubject string `json:"custom_subject,omitempty"` + DisplayName string `json:"display_name"` + Id string `json:"id,omitempty"` + LifecycleState string `json:"lifecycle_state,omitempty"` + NotifyOnOk bool `json:"notify_on_ok,omitempty"` + OwnerUserName string `json:"owner_user_name,omitempty"` + ParentPath string `json:"parent_path,omitempty"` + QueryId string `json:"query_id"` + SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"` + State string `json:"state,omitempty"` + TriggerTime string `json:"trigger_time,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + Condition *ResourceAlertCondition `json:"condition,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_custom_app_integration.go b/bundle/internal/tf/schema/resource_custom_app_integration.go new file mode 100644 index 000000000..e89eb7fe5 --- /dev/null +++ b/bundle/internal/tf/schema/resource_custom_app_integration.go @@ -0,0 +1,23 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceCustomAppIntegrationTokenAccessPolicy struct { + AccessTokenTtlInMinutes int `json:"access_token_ttl_in_minutes,omitempty"` + RefreshTokenTtlInMinutes int `json:"refresh_token_ttl_in_minutes,omitempty"` +} + +type ResourceCustomAppIntegration struct { + ClientId string `json:"client_id,omitempty"` + ClientSecret string `json:"client_secret,omitempty"` + Confidential bool `json:"confidential,omitempty"` + CreateTime string `json:"create_time,omitempty"` + CreatedBy int `json:"created_by,omitempty"` + CreatorUsername string `json:"creator_username,omitempty"` + Id string `json:"id,omitempty"` + IntegrationId string `json:"integration_id,omitempty"` + Name string `json:"name,omitempty"` + RedirectUrls []string `json:"redirect_urls,omitempty"` + Scopes []string `json:"scopes,omitempty"` + TokenAccessPolicy *ResourceCustomAppIntegrationTokenAccessPolicy `json:"token_access_policy,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_library.go b/bundle/internal/tf/schema/resource_library.go index 385d992df..4fad7dbde 100644 --- a/bundle/internal/tf/schema/resource_library.go +++ b/bundle/internal/tf/schema/resource_library.go @@ -19,13 +19,13 @@ type ResourceLibraryPypi struct { } type ResourceLibrary struct { - ClusterId string `json:"cluster_id"` - Egg string `json:"egg,omitempty"` - Id string `json:"id,omitempty"` - Jar string `json:"jar,omitempty"` - Requirements string `json:"requirements,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceLibraryCran `json:"cran,omitempty"` - Maven *ResourceLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceLibraryPypi `json:"pypi,omitempty"` + ClusterId string `json:"cluster_id"` + Egg string `json:"egg,omitempty"` + Id string `json:"id,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran []ResourceLibraryCran `json:"cran,omitempty"` + Maven []ResourceLibraryMaven `json:"maven,omitempty"` + Pypi []ResourceLibraryPypi `json:"pypi,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 2cb459aba..7238d24a8 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -137,6 +137,7 @@ type ResourcePipelineFilters struct { type ResourcePipelineGatewayDefinition struct { ConnectionId string `json:"connection_id,omitempty"` + ConnectionName string `json:"connection_name,omitempty"` GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"` GatewayStorageName string `json:"gateway_storage_name,omitempty"` GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` @@ -242,6 +243,12 @@ type ResourcePipelineNotification struct { EmailRecipients []string `json:"email_recipients,omitempty"` } +type ResourcePipelineRestartWindow struct { + DaysOfWeek string `json:"days_of_week,omitempty"` + StartHour int `json:"start_hour"` + TimeZoneId string `json:"time_zone_id,omitempty"` +} + type ResourcePipelineTriggerCron struct { QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"` TimezoneId string `json:"timezone_id,omitempty"` @@ -288,5 +295,6 @@ type ResourcePipeline struct { LatestUpdates []ResourcePipelineLatestUpdates `json:"latest_updates,omitempty"` Library []ResourcePipelineLibrary `json:"library,omitempty"` Notification []ResourcePipelineNotification `json:"notification,omitempty"` + RestartWindow *ResourcePipelineRestartWindow `json:"restart_window,omitempty"` Trigger *ResourcePipelineTrigger `json:"trigger,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_query.go b/bundle/internal/tf/schema/resource_query.go new file mode 100644 index 000000000..dc8e517ce --- /dev/null +++ b/bundle/internal/tf/schema/resource_query.go @@ -0,0 +1,84 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceQueryParameterDateRangeValueDateRangeValue struct { + End string `json:"end"` + Start string `json:"start"` +} + +type ResourceQueryParameterDateRangeValue struct { + DynamicDateRangeValue string `json:"dynamic_date_range_value,omitempty"` + Precision string `json:"precision,omitempty"` + StartDayOfWeek int `json:"start_day_of_week,omitempty"` + DateRangeValue *ResourceQueryParameterDateRangeValueDateRangeValue `json:"date_range_value,omitempty"` +} + +type ResourceQueryParameterDateValue struct { + DateValue string `json:"date_value,omitempty"` + DynamicDateValue string `json:"dynamic_date_value,omitempty"` + Precision string `json:"precision,omitempty"` +} + +type ResourceQueryParameterEnumValueMultiValuesOptions struct { + Prefix string `json:"prefix,omitempty"` + Separator string `json:"separator,omitempty"` + Suffix string `json:"suffix,omitempty"` +} + +type ResourceQueryParameterEnumValue struct { + EnumOptions string `json:"enum_options,omitempty"` + Values []string `json:"values,omitempty"` + MultiValuesOptions *ResourceQueryParameterEnumValueMultiValuesOptions `json:"multi_values_options,omitempty"` +} + +type ResourceQueryParameterNumericValue struct { + Value int `json:"value"` +} + +type ResourceQueryParameterQueryBackedValueMultiValuesOptions struct { + Prefix string `json:"prefix,omitempty"` + Separator string `json:"separator,omitempty"` + Suffix string `json:"suffix,omitempty"` +} + +type ResourceQueryParameterQueryBackedValue struct { + QueryId string `json:"query_id"` + Values []string `json:"values,omitempty"` + MultiValuesOptions *ResourceQueryParameterQueryBackedValueMultiValuesOptions `json:"multi_values_options,omitempty"` +} + +type ResourceQueryParameterTextValue struct { + Value string `json:"value"` +} + +type ResourceQueryParameter struct { + Name string `json:"name"` + Title string `json:"title,omitempty"` + DateRangeValue *ResourceQueryParameterDateRangeValue `json:"date_range_value,omitempty"` + DateValue *ResourceQueryParameterDateValue `json:"date_value,omitempty"` + EnumValue *ResourceQueryParameterEnumValue `json:"enum_value,omitempty"` + NumericValue *ResourceQueryParameterNumericValue `json:"numeric_value,omitempty"` + QueryBackedValue *ResourceQueryParameterQueryBackedValue `json:"query_backed_value,omitempty"` + TextValue *ResourceQueryParameterTextValue `json:"text_value,omitempty"` +} + +type ResourceQuery struct { + ApplyAutoLimit bool `json:"apply_auto_limit,omitempty"` + Catalog string `json:"catalog,omitempty"` + CreateTime string `json:"create_time,omitempty"` + Description string `json:"description,omitempty"` + DisplayName string `json:"display_name"` + Id string `json:"id,omitempty"` + LastModifierUserName string `json:"last_modifier_user_name,omitempty"` + LifecycleState string `json:"lifecycle_state,omitempty"` + OwnerUserName string `json:"owner_user_name,omitempty"` + ParentPath string `json:"parent_path,omitempty"` + QueryText string `json:"query_text"` + RunAsMode string `json:"run_as_mode,omitempty"` + Schema string `json:"schema,omitempty"` + Tags []string `json:"tags,omitempty"` + UpdateTime string `json:"update_time,omitempty"` + WarehouseId string `json:"warehouse_id"` + Parameter []ResourceQueryParameter `json:"parameter,omitempty"` +} diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 53f558df6..ea5b618fd 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -4,6 +4,7 @@ package schema type Resources struct { AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` + Alert map[string]any `json:"databricks_alert,omitempty"` ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"` AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` @@ -17,6 +18,7 @@ type Resources struct { ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"` Connection map[string]any `json:"databricks_connection,omitempty"` + CustomAppIntegration map[string]any `json:"databricks_custom_app_integration,omitempty"` Dashboard map[string]any `json:"databricks_dashboard,omitempty"` DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"` @@ -68,6 +70,7 @@ type Resources struct { Pipeline map[string]any `json:"databricks_pipeline,omitempty"` Provider map[string]any `json:"databricks_provider,omitempty"` QualityMonitor map[string]any `json:"databricks_quality_monitor,omitempty"` + Query map[string]any `json:"databricks_query,omitempty"` Recipient map[string]any `json:"databricks_recipient,omitempty"` RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` Repo map[string]any `json:"databricks_repo,omitempty"` @@ -107,6 +110,7 @@ type Resources struct { func NewResources() *Resources { return &Resources{ AccessControlRuleSet: make(map[string]any), + Alert: make(map[string]any), ArtifactAllowlist: make(map[string]any), AutomaticClusterUpdateWorkspaceSetting: make(map[string]any), AwsS3Mount: make(map[string]any), @@ -120,6 +124,7 @@ func NewResources() *Resources { ClusterPolicy: make(map[string]any), ComplianceSecurityProfileWorkspaceSetting: make(map[string]any), Connection: make(map[string]any), + CustomAppIntegration: make(map[string]any), Dashboard: make(map[string]any), DbfsFile: make(map[string]any), DefaultNamespaceSetting: make(map[string]any), @@ -171,6 +176,7 @@ func NewResources() *Resources { Pipeline: make(map[string]any), Provider: make(map[string]any), QualityMonitor: make(map[string]any), + Query: make(map[string]any), Recipient: make(map[string]any), RegisteredModel: make(map[string]any), Repo: make(map[string]any), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index bf4283c9b..7ccb7a0f0 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.54.0" +const ProviderVersion = "1.58.0" func NewRoot() *Root { return &Root{ From 2edfb6cfeaba2b4a835590c5e3df78bb78cafe27 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 14 Nov 2024 15:38:52 +0100 Subject: [PATCH 14/42] [Release] Release v0.234.0 (#1902) Bundles: * Do not execute build on bundle destroy ([#1882](https://github.com/databricks/cli/pull/1882)). * Add support for non-Python ipynb notebooks to DABs ([#1827](https://github.com/databricks/cli/pull/1827)). API Changes: * Added `databricks credentials` command group. * Changed `databricks lakeview create` command with new required argument order. OpenAPI commit d25296d2f4aa7bd6195c816fdf82e0f960f775da (2024-11-07) Dependency updates: * Upgrade TF provider to 1.58.0 ([#1900](https://github.com/databricks/cli/pull/1900)). * Bump golang.org/x/sync from 0.8.0 to 0.9.0 ([#1892](https://github.com/databricks/cli/pull/1892)). * Bump golang.org/x/text from 0.19.0 to 0.20.0 ([#1893](https://github.com/databricks/cli/pull/1893)). * Bump golang.org/x/mod from 0.21.0 to 0.22.0 ([#1895](https://github.com/databricks/cli/pull/1895)). * Bump golang.org/x/oauth2 from 0.23.0 to 0.24.0 ([#1894](https://github.com/databricks/cli/pull/1894)). * Bump github.com/databricks/databricks-sdk-go from 0.49.0 to 0.51.0 ([#1878](https://github.com/databricks/cli/pull/1878)). --- CHANGELOG.md | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 9b08d7514..e5b6496bd 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,24 @@ # Version changelog +## [Release] Release v0.234.0 + +Bundles: + * Do not execute build on bundle destroy ([#1882](https://github.com/databricks/cli/pull/1882)). + * Add support for non-Python ipynb notebooks to DABs ([#1827](https://github.com/databricks/cli/pull/1827)). + +API Changes: + * Added `databricks credentials` command group. + * Changed `databricks lakeview create` command with new required argument order. + +OpenAPI commit d25296d2f4aa7bd6195c816fdf82e0f960f775da (2024-11-07) +Dependency updates: + * Upgrade TF provider to 1.58.0 ([#1900](https://github.com/databricks/cli/pull/1900)). + * Bump golang.org/x/sync from 0.8.0 to 0.9.0 ([#1892](https://github.com/databricks/cli/pull/1892)). + * Bump golang.org/x/text from 0.19.0 to 0.20.0 ([#1893](https://github.com/databricks/cli/pull/1893)). + * Bump golang.org/x/mod from 0.21.0 to 0.22.0 ([#1895](https://github.com/databricks/cli/pull/1895)). + * Bump golang.org/x/oauth2 from 0.23.0 to 0.24.0 ([#1894](https://github.com/databricks/cli/pull/1894)). + * Bump github.com/databricks/databricks-sdk-go from 0.49.0 to 0.51.0 ([#1878](https://github.com/databricks/cli/pull/1878)). + ## [Release] Release v0.233.0 CLI: From 1508d65c4cec06c697060b5ea143a835f5ec0f24 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 14 Nov 2024 17:10:45 +0100 Subject: [PATCH 15/42] Extract functionality to detect if the CLI is running on DBR (#1889) ## Changes Whether or not the CLI is running on DBR can be detected once and stored in the command's context. By storing it in the context, it can easily be mocked for testing. This builds on the simpler approach and conversation in #1744. It unblocks testing of the DBR-specific paths while not compromising on the checks we can perform to test if the CLI is running on DBR. ## Tests * Unit tests for the new `dbr` package * New unit test for the `ConfigureWSFS` mutator --- bundle/config/mutator/configure_wsfs.go | 6 +- bundle/config/mutator/configure_wsfs_test.go | 65 +++++++++++++++ cmd/root/root.go | 4 + libs/dbr/context.go | 49 ++++++++++++ libs/dbr/context_test.go | 59 ++++++++++++++ libs/dbr/detect.go | 35 +++++++++ libs/dbr/detect_test.go | 83 ++++++++++++++++++++ libs/fakefs/fakefs.go | 55 +++++++++++++ 8 files changed, 352 insertions(+), 4 deletions(-) create mode 100644 bundle/config/mutator/configure_wsfs_test.go create mode 100644 libs/dbr/context.go create mode 100644 libs/dbr/context_test.go create mode 100644 libs/dbr/detect.go create mode 100644 libs/dbr/detect_test.go create mode 100644 libs/fakefs/fakefs.go diff --git a/bundle/config/mutator/configure_wsfs.go b/bundle/config/mutator/configure_wsfs.go index 1d1bec582..110e1a381 100644 --- a/bundle/config/mutator/configure_wsfs.go +++ b/bundle/config/mutator/configure_wsfs.go @@ -5,14 +5,12 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/vfs" ) -const envDatabricksRuntimeVersion = "DATABRICKS_RUNTIME_VERSION" - type configureWSFS struct{} func ConfigureWSFS() bundle.Mutator { @@ -32,7 +30,7 @@ func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno } // The executable must be running on DBR. - if _, ok := env.Lookup(ctx, envDatabricksRuntimeVersion); !ok { + if !dbr.RunsOnRuntime(ctx) { return nil } diff --git a/bundle/config/mutator/configure_wsfs_test.go b/bundle/config/mutator/configure_wsfs_test.go new file mode 100644 index 000000000..6f76293e0 --- /dev/null +++ b/bundle/config/mutator/configure_wsfs_test.go @@ -0,0 +1,65 @@ +package mutator_test + +import ( + "context" + "runtime" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/vfs" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/stretchr/testify/assert" +) + +func mockBundleForConfigureWSFS(t *testing.T, syncRootPath string) *bundle.Bundle { + // The native path of the sync root on Windows will never match the /Workspace prefix, + // so the test case for nominal behavior will always fail. + if runtime.GOOS == "windows" { + t.Skip("this test is not applicable on Windows") + } + + b := &bundle.Bundle{ + SyncRoot: vfs.MustNew(syncRootPath), + } + + w := mocks.NewMockWorkspaceClient(t) + w.WorkspaceClient.Config = &config.Config{} + b.SetWorkpaceClient(w.WorkspaceClient) + + return b +} + +func TestConfigureWSFS_SkipsIfNotWorkspacePrefix(t *testing.T) { + b := mockBundleForConfigureWSFS(t, "/foo") + originalSyncRoot := b.SyncRoot + + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS()) + assert.Empty(t, diags) + assert.Equal(t, originalSyncRoot, b.SyncRoot) +} + +func TestConfigureWSFS_SkipsIfNotRunningOnRuntime(t *testing.T) { + b := mockBundleForConfigureWSFS(t, "/Workspace/foo") + originalSyncRoot := b.SyncRoot + + ctx := context.Background() + ctx = dbr.MockRuntime(ctx, false) + diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS()) + assert.Empty(t, diags) + assert.Equal(t, originalSyncRoot, b.SyncRoot) +} + +func TestConfigureWSFS_SwapSyncRoot(t *testing.T) { + b := mockBundleForConfigureWSFS(t, "/Workspace/foo") + originalSyncRoot := b.SyncRoot + + ctx := context.Background() + ctx = dbr.MockRuntime(ctx, true) + diags := bundle.Apply(ctx, b, mutator.ConfigureWSFS()) + assert.Empty(t, diags) + assert.NotEqual(t, originalSyncRoot, b.SyncRoot) +} diff --git a/cmd/root/root.go b/cmd/root/root.go index 7059586f3..e6f66f126 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/log" "github.com/spf13/cobra" ) @@ -73,6 +74,9 @@ func New(ctx context.Context) *cobra.Command { // get the context back ctx = cmd.Context() + // Detect if the CLI is running on DBR and store this on the context. + ctx = dbr.DetectRuntime(ctx) + // Configure our user agent with the command that's about to be executed. ctx = withCommandInUserAgent(ctx, cmd) ctx = withCommandExecIdInUserAgent(ctx) diff --git a/libs/dbr/context.go b/libs/dbr/context.go new file mode 100644 index 000000000..7512c0fe2 --- /dev/null +++ b/libs/dbr/context.go @@ -0,0 +1,49 @@ +package dbr + +import "context" + +// key is a package-local type to use for context keys. +// +// Using an unexported type for context keys prevents key collisions across +// packages since external packages cannot create values of this type. +type key int + +const ( + // dbrKey is the context key for the detection result. + // The value of 1 is arbitrary and can be any number. + // Other keys in the same package must have different values. + dbrKey = key(1) +) + +// DetectRuntime detects whether or not the current +// process is running inside a Databricks Runtime environment. +// It return a new context with the detection result set. +func DetectRuntime(ctx context.Context) context.Context { + if v := ctx.Value(dbrKey); v != nil { + panic("dbr.DetectRuntime called twice on the same context") + } + return context.WithValue(ctx, dbrKey, detect(ctx)) +} + +// MockRuntime is a helper function to mock the detection result. +// It returns a new context with the detection result set. +func MockRuntime(ctx context.Context, b bool) context.Context { + if v := ctx.Value(dbrKey); v != nil { + panic("dbr.MockRuntime called twice on the same context") + } + return context.WithValue(ctx, dbrKey, b) +} + +// RunsOnRuntime returns the detection result from the context. +// It expects a context returned by [DetectRuntime] or [MockRuntime]. +// +// We store this value in a context to avoid having to use either +// a global variable, passing a boolean around everywhere, or +// performing the same detection multiple times. +func RunsOnRuntime(ctx context.Context) bool { + v := ctx.Value(dbrKey) + if v == nil { + panic("dbr.RunsOnRuntime called without calling dbr.DetectRuntime first") + } + return v.(bool) +} diff --git a/libs/dbr/context_test.go b/libs/dbr/context_test.go new file mode 100644 index 000000000..fc53cf130 --- /dev/null +++ b/libs/dbr/context_test.go @@ -0,0 +1,59 @@ +package dbr + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestContext_DetectRuntimePanics(t *testing.T) { + ctx := context.Background() + + // Run detection. + ctx = DetectRuntime(ctx) + + // Expect a panic if the detection is run twice. + assert.Panics(t, func() { + ctx = DetectRuntime(ctx) + }) +} + +func TestContext_MockRuntimePanics(t *testing.T) { + ctx := context.Background() + + // Run detection. + ctx = MockRuntime(ctx, true) + + // Expect a panic if the mock function is run twice. + assert.Panics(t, func() { + MockRuntime(ctx, true) + }) +} + +func TestContext_RunsOnRuntimePanics(t *testing.T) { + ctx := context.Background() + + // Expect a panic if the detection is not run. + assert.Panics(t, func() { + RunsOnRuntime(ctx) + }) +} + +func TestContext_RunsOnRuntime(t *testing.T) { + ctx := context.Background() + + // Run detection. + ctx = DetectRuntime(ctx) + + // Expect no panic because detection has run. + assert.NotPanics(t, func() { + RunsOnRuntime(ctx) + }) +} + +func TestContext_RunsOnRuntimeWithMock(t *testing.T) { + ctx := context.Background() + assert.True(t, RunsOnRuntime(MockRuntime(ctx, true))) + assert.False(t, RunsOnRuntime(MockRuntime(ctx, false))) +} diff --git a/libs/dbr/detect.go b/libs/dbr/detect.go new file mode 100644 index 000000000..d8b4dfe20 --- /dev/null +++ b/libs/dbr/detect.go @@ -0,0 +1,35 @@ +package dbr + +import ( + "context" + "os" + "runtime" + + "github.com/databricks/cli/libs/env" +) + +// Dereference [os.Stat] to allow mocking in tests. +var statFunc = os.Stat + +// detect returns true if the current process is running on a Databricks Runtime. +// Its return value is meant to be cached in the context. +func detect(ctx context.Context) bool { + // Databricks Runtime implies Linux. + // Return early on other operating systems. + if runtime.GOOS != "linux" { + return false + } + + // Databricks Runtime always has the DATABRICKS_RUNTIME_VERSION environment variable set. + if value, ok := env.Lookup(ctx, "DATABRICKS_RUNTIME_VERSION"); !ok || value == "" { + return false + } + + // Expect to see a "/databricks" directory. + if fi, err := statFunc("/databricks"); err != nil || !fi.IsDir() { + return false + } + + // All checks passed. + return true +} diff --git a/libs/dbr/detect_test.go b/libs/dbr/detect_test.go new file mode 100644 index 000000000..3a4a43a73 --- /dev/null +++ b/libs/dbr/detect_test.go @@ -0,0 +1,83 @@ +package dbr + +import ( + "context" + "io/fs" + "runtime" + "testing" + + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/fakefs" + "github.com/stretchr/testify/assert" +) + +func requireLinux(t *testing.T) { + if runtime.GOOS != "linux" { + t.Skipf("skipping test on %s", runtime.GOOS) + } +} + +func configureStatFunc(t *testing.T, fi fs.FileInfo, err error) { + originalFunc := statFunc + statFunc = func(name string) (fs.FileInfo, error) { + assert.Equal(t, "/databricks", name) + return fi, err + } + + t.Cleanup(func() { + statFunc = originalFunc + }) +} + +func TestDetect_NotLinux(t *testing.T) { + if runtime.GOOS == "linux" { + t.Skip("skipping test on Linux OS") + } + + ctx := context.Background() + assert.False(t, detect(ctx)) +} + +func TestDetect_Env(t *testing.T) { + requireLinux(t) + + // Configure other checks to pass. + configureStatFunc(t, fakefs.FileInfo{FakeDir: true}, nil) + + t.Run("empty", func(t *testing.T) { + ctx := env.Set(context.Background(), "DATABRICKS_RUNTIME_VERSION", "") + assert.False(t, detect(ctx)) + }) + + t.Run("non-empty cluster", func(t *testing.T) { + ctx := env.Set(context.Background(), "DATABRICKS_RUNTIME_VERSION", "15.4") + assert.True(t, detect(ctx)) + }) + + t.Run("non-empty serverless", func(t *testing.T) { + ctx := env.Set(context.Background(), "DATABRICKS_RUNTIME_VERSION", "client.1.13") + assert.True(t, detect(ctx)) + }) +} + +func TestDetect_Stat(t *testing.T) { + requireLinux(t) + + // Configure other checks to pass. + ctx := env.Set(context.Background(), "DATABRICKS_RUNTIME_VERSION", "non-empty") + + t.Run("error", func(t *testing.T) { + configureStatFunc(t, nil, fs.ErrNotExist) + assert.False(t, detect(ctx)) + }) + + t.Run("not a directory", func(t *testing.T) { + configureStatFunc(t, fakefs.FileInfo{}, nil) + assert.False(t, detect(ctx)) + }) + + t.Run("directory", func(t *testing.T) { + configureStatFunc(t, fakefs.FileInfo{FakeDir: true}, nil) + assert.True(t, detect(ctx)) + }) +} diff --git a/libs/fakefs/fakefs.go b/libs/fakefs/fakefs.go new file mode 100644 index 000000000..2f4756970 --- /dev/null +++ b/libs/fakefs/fakefs.go @@ -0,0 +1,55 @@ +package fakefs + +import ( + "io/fs" + "time" +) + +// DirEntry is a fake implementation of [fs.DirEntry]. +type DirEntry struct { + FileInfo +} + +func (entry DirEntry) Type() fs.FileMode { + typ := fs.ModePerm + if entry.FakeDir { + typ |= fs.ModeDir + } + return typ +} + +func (entry DirEntry) Info() (fs.FileInfo, error) { + return entry.FileInfo, nil +} + +// FileInfo is a fake implementation of [fs.FileInfo]. +type FileInfo struct { + FakeName string + FakeSize int64 + FakeDir bool + FakeMode fs.FileMode +} + +func (info FileInfo) Name() string { + return info.FakeName +} + +func (info FileInfo) Size() int64 { + return info.FakeSize +} + +func (info FileInfo) Mode() fs.FileMode { + return info.FakeMode +} + +func (info FileInfo) ModTime() time.Time { + return time.Now() +} + +func (info FileInfo) IsDir() bool { + return info.FakeDir +} + +func (info FileInfo) Sys() any { + return nil +} From 1db384018c5efc6c7b1a9a43d5f1268c97ddd58d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 14 Nov 2024 18:39:38 +0100 Subject: [PATCH 16/42] Make `TableName` field part of quality monitor schema (#1903) ## Changes This field was special-cased in #1307 because it's not part of the JSON payload in the SDK struct. This approach, while pragmatic, meant it didn't show up in the JSON schema. While debugging an issue with quality monitors in #1900, I couldn't figure out why I was getting schema errors on this field, or how it was passed through to the TF representation. This commit removes the special case and makes it behave like everything else. ## Tests * Unit tests pass. * Confirmed that the updated schema failed validation before this change. --- bundle/config/mutator/initialize_urls_test.go | 5 ++--- .../config/mutator/process_target_mode_test.go | 17 ++++++++++++----- bundle/config/resources/quality_monitor.go | 16 +++++++--------- .../tfdyn/convert_quality_monitor_test.go | 2 +- .../schema/testdata/pass/quality_monitor.yml | 1 + bundle/schema/jsonschema.json | 4 ++++ libs/dyn/convert/struct_info.go | 9 --------- 7 files changed, 27 insertions(+), 27 deletions(-) diff --git a/bundle/config/mutator/initialize_urls_test.go b/bundle/config/mutator/initialize_urls_test.go index 16b67dac8..ec4e790c4 100644 --- a/bundle/config/mutator/initialize_urls_test.go +++ b/bundle/config/mutator/initialize_urls_test.go @@ -65,9 +65,8 @@ func TestInitializeURLs(t *testing.T) { }, QualityMonitors: map[string]*resources.QualityMonitor{ "qualityMonitor1": { - CreateMonitor: &catalog.CreateMonitor{ - TableName: "catalog.schema.qualityMonitor1", - }, + TableName: "catalog.schema.qualityMonitor1", + CreateMonitor: &catalog.CreateMonitor{}, }, }, Schemas: map[string]*resources.Schema{ diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index b694f627a..4135d5fdf 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -102,16 +102,23 @@ func mockBundle(mode config.Mode) *bundle.Bundle { "registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}}, }, QualityMonitors: map[string]*resources.QualityMonitor{ - "qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}}, - "qualityMonitor2": { + "qualityMonitor1": { + TableName: "qualityMonitor1", CreateMonitor: &catalog.CreateMonitor{ - TableName: "qualityMonitor2", - Schedule: &catalog.MonitorCronSchedule{}, + OutputSchemaName: "catalog.schema", + }, + }, + "qualityMonitor2": { + TableName: "qualityMonitor2", + CreateMonitor: &catalog.CreateMonitor{ + OutputSchemaName: "catalog.schema", + Schedule: &catalog.MonitorCronSchedule{}, }, }, "qualityMonitor3": { + TableName: "qualityMonitor3", CreateMonitor: &catalog.CreateMonitor{ - TableName: "qualityMonitor3", + OutputSchemaName: "catalog.schema", Schedule: &catalog.MonitorCronSchedule{ PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused, }, diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index 3c823e625..30ec4f918 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -13,17 +13,15 @@ import ( ) type QualityMonitor struct { - // Represents the Input Arguments for Terraform and will get - // converted to a HCL representation for CRUD - *catalog.CreateMonitor - - // This represents the id which is the full name of the monitor - // (catalog_name.schema_name.table_name) that can be used - // as a reference in other resources. This value is returned by terraform. - ID string `json:"id,omitempty" bundle:"readonly"` - + ID string `json:"id,omitempty" bundle:"readonly"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` URL string `json:"url,omitempty" bundle:"internal"` + + // The table name is a required field but not included as a JSON field in [catalog.CreateMonitor]. + TableName string `json:"table_name"` + + // This struct defines the creation payload for a monitor. + *catalog.CreateMonitor } func (s *QualityMonitor) UnmarshalJSON(b []byte) error { diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go index 50bfce7a0..f71abf43c 100644 --- a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go @@ -15,8 +15,8 @@ import ( func TestConvertQualityMonitor(t *testing.T) { var src = resources.QualityMonitor{ + TableName: "test_table_name", CreateMonitor: &catalog.CreateMonitor{ - TableName: "test_table_name", AssetsDir: "assets_dir", OutputSchemaName: "output_schema_name", InferenceLog: &catalog.MonitorInferenceLog{ diff --git a/bundle/internal/schema/testdata/pass/quality_monitor.yml b/bundle/internal/schema/testdata/pass/quality_monitor.yml index a9be59329..79c4dd69b 100644 --- a/bundle/internal/schema/testdata/pass/quality_monitor.yml +++ b/bundle/internal/schema/testdata/pass/quality_monitor.yml @@ -4,6 +4,7 @@ bundle: resources: quality_monitors: myqualitymonitor: + table_name: catalog.schema.quality_monitor inference_log: granularities: - a diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index dc0d7f953..703daafeb 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -684,6 +684,9 @@ "description": "Configuration for monitoring snapshot tables.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorSnapshot" }, + "table_name": { + "$ref": "#/$defs/string" + }, "time_series": { "description": "Configuration for monitoring time series tables.", "$ref": "#/$defs/github.com/databricks/databricks-sdk-go/service/catalog.MonitorTimeSeries" @@ -695,6 +698,7 @@ }, "additionalProperties": false, "required": [ + "table_name", "assets_dir", "output_schema_name" ] diff --git a/libs/dyn/convert/struct_info.go b/libs/dyn/convert/struct_info.go index 595e52edd..dc3ed4da4 100644 --- a/libs/dyn/convert/struct_info.go +++ b/libs/dyn/convert/struct_info.go @@ -6,7 +6,6 @@ import ( "sync" "github.com/databricks/cli/libs/dyn" - "github.com/databricks/cli/libs/textutil" ) // structInfo holds the type information we need to efficiently @@ -85,14 +84,6 @@ func buildStructInfo(typ reflect.Type) structInfo { } name, _, _ := strings.Cut(sf.Tag.Get("json"), ",") - if typ.Name() == "QualityMonitor" && name == "-" { - urlName, _, _ := strings.Cut(sf.Tag.Get("url"), ",") - if urlName == "" || urlName == "-" { - name = textutil.CamelToSnakeCase(sf.Name) - } else { - name = urlName - } - } if name == "" || name == "-" { continue } From 7f3fb10c4ac92d4e53b6e313dc87c31873c3c8ea Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 15 Nov 2024 16:03:59 +0100 Subject: [PATCH 17/42] Do not prepend paths starting with ~ or variable reference (#1905) ## Changes Fixes #1904 ## Tests Added regression test --- bundle/config/mutator/prepend_workspace_prefix.go | 5 +++++ bundle/config/mutator/prepend_workspace_prefix_test.go | 8 ++++++++ 2 files changed, 13 insertions(+) diff --git a/bundle/config/mutator/prepend_workspace_prefix.go b/bundle/config/mutator/prepend_workspace_prefix.go index de71bf7fd..e0be2572d 100644 --- a/bundle/config/mutator/prepend_workspace_prefix.go +++ b/bundle/config/mutator/prepend_workspace_prefix.go @@ -44,6 +44,11 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di return dyn.InvalidValue, fmt.Errorf("expected string, got %s", v.Kind()) } + // Skip prefixing if the path does not start with /, it might be variable reference or smth else. + if !strings.HasPrefix(path, "/") { + return pv, nil + } + for _, prefix := range skipPrefixes { if strings.HasPrefix(path, prefix) { return pv, nil diff --git a/bundle/config/mutator/prepend_workspace_prefix_test.go b/bundle/config/mutator/prepend_workspace_prefix_test.go index 6fbadec56..31393e6bd 100644 --- a/bundle/config/mutator/prepend_workspace_prefix_test.go +++ b/bundle/config/mutator/prepend_workspace_prefix_test.go @@ -31,6 +31,14 @@ func TestPrependWorkspacePrefix(t *testing.T) { path: "/Volumes/Users/test", expected: "/Volumes/Users/test", }, + { + path: "~/test", + expected: "~/test", + }, + { + path: "${workspace.file_path}/test", + expected: "${workspace.file_path}/test", + }, } for _, tc := range testCases { From 7d732ceba8c5229f5228762db0d123764e290ddd Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Fri, 15 Nov 2024 16:37:21 +0100 Subject: [PATCH 18/42] Consolidate test helpers for `io/fs` (#1906) ## Changes We had a number of copies of test helpers for `io/fs` in the repository. This change consolidates all of them to use the `libs/fakefs` package. ## Tests Unit tests pass. --- cmd/fs/helpers_test.go | 3 +- libs/fakefs/fakefs.go | 36 +++++++++- libs/fakefs/fakefs_test.go | 38 ++++++++++ libs/filer/completer/completer_test.go | 3 +- libs/filer/fake_filer.go | 60 +++------------- libs/filer/fake_filer_test.go | 98 ++++++++++++++++++++++++++ libs/filer/fs_test.go | 3 +- libs/notebook/detect_test.go | 21 ++++-- libs/notebook/fakefs_test.go | 77 -------------------- 9 files changed, 201 insertions(+), 138 deletions(-) create mode 100644 libs/fakefs/fakefs_test.go create mode 100644 libs/filer/fake_filer_test.go delete mode 100644 libs/notebook/fakefs_test.go diff --git a/cmd/fs/helpers_test.go b/cmd/fs/helpers_test.go index 10b4aa160..a01035cc7 100644 --- a/cmd/fs/helpers_test.go +++ b/cmd/fs/helpers_test.go @@ -7,6 +7,7 @@ import ( "testing" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/fakefs" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/spf13/cobra" @@ -84,7 +85,7 @@ func setupTest(t *testing.T) (*validArgs, *cobra.Command, *mocks.MockWorkspaceCl cmd, m := setupCommand(t) fakeFilerForPath := func(ctx context.Context, fullPath string) (filer.Filer, string, error) { - fakeFiler := filer.NewFakeFiler(map[string]filer.FakeFileInfo{ + fakeFiler := filer.NewFakeFiler(map[string]fakefs.FileInfo{ "dir": {FakeName: "root", FakeDir: true}, "dir/dirA": {FakeDir: true}, "dir/dirB": {FakeDir: true}, diff --git a/libs/fakefs/fakefs.go b/libs/fakefs/fakefs.go index 2f4756970..a8d5eb873 100644 --- a/libs/fakefs/fakefs.go +++ b/libs/fakefs/fakefs.go @@ -1,18 +1,21 @@ package fakefs import ( + "fmt" "io/fs" "time" ) +var ErrNotImplemented = fmt.Errorf("not implemented") + // DirEntry is a fake implementation of [fs.DirEntry]. type DirEntry struct { - FileInfo + fs.FileInfo } func (entry DirEntry) Type() fs.FileMode { typ := fs.ModePerm - if entry.FakeDir { + if entry.IsDir() { typ |= fs.ModeDir } return typ @@ -53,3 +56,32 @@ func (info FileInfo) IsDir() bool { func (info FileInfo) Sys() any { return nil } + +// File is a fake implementation of [fs.File]. +type File struct { + FileInfo fs.FileInfo +} + +func (f File) Close() error { + return nil +} + +func (f File) Read(p []byte) (n int, err error) { + return 0, ErrNotImplemented +} + +func (f File) Stat() (fs.FileInfo, error) { + return f.FileInfo, nil +} + +// FS is a fake implementation of [fs.FS]. +type FS map[string]fs.File + +func (f FS) Open(name string) (fs.File, error) { + e, ok := f[name] + if !ok { + return nil, fs.ErrNotExist + } + + return e, nil +} diff --git a/libs/fakefs/fakefs_test.go b/libs/fakefs/fakefs_test.go new file mode 100644 index 000000000..b89190206 --- /dev/null +++ b/libs/fakefs/fakefs_test.go @@ -0,0 +1,38 @@ +package fakefs + +import ( + "io/fs" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestFile(t *testing.T) { + var fakefile fs.File = File{ + FileInfo: FileInfo{ + FakeName: "file", + }, + } + + _, err := fakefile.Read([]byte{}) + assert.ErrorIs(t, err, ErrNotImplemented) + + fi, err := fakefile.Stat() + assert.NoError(t, err) + assert.Equal(t, "file", fi.Name()) + + err = fakefile.Close() + assert.NoError(t, err) +} + +func TestFS(t *testing.T) { + var fakefs fs.FS = FS{ + "file": File{}, + } + + _, err := fakefs.Open("doesntexist") + assert.ErrorIs(t, err, fs.ErrNotExist) + + _, err = fakefs.Open("file") + assert.NoError(t, err) +} diff --git a/libs/filer/completer/completer_test.go b/libs/filer/completer/completer_test.go index c533f0b6c..d284447b9 100644 --- a/libs/filer/completer/completer_test.go +++ b/libs/filer/completer/completer_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/fakefs" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/spf13/cobra" @@ -17,7 +18,7 @@ func setupCompleter(t *testing.T, onlyDirs bool) *completer { // Needed to make type context.valueCtx for mockFilerForPath ctx = root.SetWorkspaceClient(ctx, mocks.NewMockWorkspaceClient(t).WorkspaceClient) - fakeFiler := filer.NewFakeFiler(map[string]filer.FakeFileInfo{ + fakeFiler := filer.NewFakeFiler(map[string]fakefs.FileInfo{ "dir": {FakeName: "root", FakeDir: true}, "dir/dirA": {FakeDir: true}, "dir/dirB": {FakeDir: true}, diff --git a/libs/filer/fake_filer.go b/libs/filer/fake_filer.go index 0e650ff60..76b8bcd94 100644 --- a/libs/filer/fake_filer.go +++ b/libs/filer/fake_filer.go @@ -8,58 +8,12 @@ import ( "path" "sort" "strings" - "time" + + "github.com/databricks/cli/libs/fakefs" ) -type FakeDirEntry struct { - FakeFileInfo -} - -func (entry FakeDirEntry) Type() fs.FileMode { - typ := fs.ModePerm - if entry.FakeDir { - typ |= fs.ModeDir - } - return typ -} - -func (entry FakeDirEntry) Info() (fs.FileInfo, error) { - return entry.FakeFileInfo, nil -} - -type FakeFileInfo struct { - FakeName string - FakeSize int64 - FakeDir bool - FakeMode fs.FileMode -} - -func (info FakeFileInfo) Name() string { - return info.FakeName -} - -func (info FakeFileInfo) Size() int64 { - return info.FakeSize -} - -func (info FakeFileInfo) Mode() fs.FileMode { - return info.FakeMode -} - -func (info FakeFileInfo) ModTime() time.Time { - return time.Now() -} - -func (info FakeFileInfo) IsDir() bool { - return info.FakeDir -} - -func (info FakeFileInfo) Sys() any { - return nil -} - type FakeFiler struct { - entries map[string]FakeFileInfo + entries map[string]fakefs.FileInfo } func (f *FakeFiler) Write(ctx context.Context, p string, reader io.Reader, mode ...WriteMode) error { @@ -97,7 +51,7 @@ func (f *FakeFiler) ReadDir(ctx context.Context, p string) ([]fs.DirEntry, error continue } - out = append(out, FakeDirEntry{v}) + out = append(out, fakefs.DirEntry{FileInfo: v}) } sort.Slice(out, func(i, j int) bool { return out[i].Name() < out[j].Name() }) @@ -117,7 +71,11 @@ func (f *FakeFiler) Stat(ctx context.Context, path string) (fs.FileInfo, error) return entry, nil } -func NewFakeFiler(entries map[string]FakeFileInfo) *FakeFiler { +// NewFakeFiler creates a new fake [Filer] instance with the given entries. +// It sets the [Name] field of each entry to the base name of the path. +// +// This is meant to be used in tests. +func NewFakeFiler(entries map[string]fakefs.FileInfo) *FakeFiler { fakeFiler := &FakeFiler{ entries: entries, } diff --git a/libs/filer/fake_filer_test.go b/libs/filer/fake_filer_test.go new file mode 100644 index 000000000..fb5364888 --- /dev/null +++ b/libs/filer/fake_filer_test.go @@ -0,0 +1,98 @@ +package filer + +import ( + "context" + "io" + "io/fs" + "testing" + + "github.com/databricks/cli/libs/fakefs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFakeFiler_Read(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "file": {}, + }) + + ctx := context.Background() + r, err := f.Read(ctx, "file") + require.NoError(t, err) + contents, err := io.ReadAll(r) + require.NoError(t, err) + + // Contents of every file is "foo". + assert.Equal(t, "foo", string(contents)) +} + +func TestFakeFiler_Read_NotFound(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "foo": {}, + }) + + ctx := context.Background() + _, err := f.Read(ctx, "bar") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestFakeFiler_ReadDir_NotFound(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "dir1": {FakeDir: true}, + }) + + ctx := context.Background() + _, err := f.ReadDir(ctx, "dir2") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestFakeFiler_ReadDir_NotADirectory(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "file": {}, + }) + + ctx := context.Background() + _, err := f.ReadDir(ctx, "file") + assert.ErrorIs(t, err, fs.ErrInvalid) +} + +func TestFakeFiler_ReadDir(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "dir1": {FakeDir: true}, + "dir1/file2": {}, + "dir1/dir2": {FakeDir: true}, + }) + + ctx := context.Background() + entries, err := f.ReadDir(ctx, "dir1/") + require.NoError(t, err) + require.Len(t, entries, 2) + + // The entries are sorted by name. + assert.Equal(t, "dir2", entries[0].Name()) + assert.True(t, entries[0].IsDir()) + assert.Equal(t, "file2", entries[1].Name()) + assert.False(t, entries[1].IsDir()) +} + +func TestFakeFiler_Stat(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "file": {}, + }) + + ctx := context.Background() + info, err := f.Stat(ctx, "file") + require.NoError(t, err) + + assert.Equal(t, "file", info.Name()) +} + +func TestFakeFiler_Stat_NotFound(t *testing.T) { + f := NewFakeFiler(map[string]fakefs.FileInfo{ + "foo": {}, + }) + + ctx := context.Background() + _, err := f.Stat(ctx, "bar") + assert.ErrorIs(t, err, fs.ErrNotExist) +} diff --git a/libs/filer/fs_test.go b/libs/filer/fs_test.go index a74c10f0b..849cf6f7c 100644 --- a/libs/filer/fs_test.go +++ b/libs/filer/fs_test.go @@ -6,6 +6,7 @@ import ( "io/fs" "testing" + "github.com/databricks/cli/libs/fakefs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -35,7 +36,7 @@ func TestFsDirImplementsFsReadDirFile(t *testing.T) { } func fakeFS() fs.FS { - fakeFiler := NewFakeFiler(map[string]FakeFileInfo{ + fakeFiler := NewFakeFiler(map[string]fakefs.FileInfo{ ".": {FakeName: "root", FakeDir: true}, "dirA": {FakeDir: true}, "dirB": {FakeDir: true}, diff --git a/libs/notebook/detect_test.go b/libs/notebook/detect_test.go index ad89d6dd5..786c7e394 100644 --- a/libs/notebook/detect_test.go +++ b/libs/notebook/detect_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/libs/fakefs" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -100,11 +101,21 @@ func TestDetectFileWithLongHeader(t *testing.T) { assert.False(t, nb) } +type fileInfoWithWorkspaceInfo struct { + fakefs.FileInfo + + oi workspace.ObjectInfo +} + +func (f fileInfoWithWorkspaceInfo) WorkspaceObjectInfo() workspace.ObjectInfo { + return f.oi +} + func TestDetectWithObjectInfo(t *testing.T) { - fakeFS := &fakeFS{ - fakeFile{ - fakeFileInfo{ - workspace.ObjectInfo{ + fakefs := fakefs.FS{ + "file.py": fakefs.File{ + FileInfo: fileInfoWithWorkspaceInfo{ + oi: workspace.ObjectInfo{ ObjectType: workspace.ObjectTypeNotebook, Language: workspace.LanguagePython, }, @@ -112,7 +123,7 @@ func TestDetectWithObjectInfo(t *testing.T) { }, } - nb, lang, err := DetectWithFS(fakeFS, "doesntmatter") + nb, lang, err := DetectWithFS(fakefs, "file.py") require.NoError(t, err) assert.True(t, nb) assert.Equal(t, workspace.LanguagePython, lang) diff --git a/libs/notebook/fakefs_test.go b/libs/notebook/fakefs_test.go deleted file mode 100644 index 4ac135dd4..000000000 --- a/libs/notebook/fakefs_test.go +++ /dev/null @@ -1,77 +0,0 @@ -package notebook - -import ( - "fmt" - "io/fs" - "time" - - "github.com/databricks/databricks-sdk-go/service/workspace" -) - -type fakeFS struct { - fakeFile -} - -type fakeFile struct { - fakeFileInfo -} - -func (f fakeFile) Close() error { - return nil -} - -func (f fakeFile) Read(p []byte) (n int, err error) { - return 0, fmt.Errorf("not implemented") -} - -func (f fakeFile) Stat() (fs.FileInfo, error) { - return f.fakeFileInfo, nil -} - -type fakeFileInfo struct { - oi workspace.ObjectInfo -} - -func (f fakeFileInfo) WorkspaceObjectInfo() workspace.ObjectInfo { - return f.oi -} - -func (f fakeFileInfo) Name() string { - return "" -} - -func (f fakeFileInfo) Size() int64 { - return 0 -} - -func (f fakeFileInfo) Mode() fs.FileMode { - return 0 -} - -func (f fakeFileInfo) ModTime() time.Time { - return time.Time{} -} - -func (f fakeFileInfo) IsDir() bool { - return false -} - -func (f fakeFileInfo) Sys() any { - return nil -} - -func (f fakeFS) Open(name string) (fs.File, error) { - return f.fakeFile, nil -} - -func (f fakeFS) Stat(name string) (fs.FileInfo, error) { - panic("not implemented") -} - -func (f fakeFS) ReadDir(name string) ([]fs.DirEntry, error) { - panic("not implemented") -} - -func (f fakeFS) ReadFile(name string) ([]byte, error) { - panic("not implemented") -} From 72dde793d88801aa4706d31d5b01028e92eddf60 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 18 Nov 2024 22:55:24 +0530 Subject: [PATCH 19/42] Fix workspace extensions filer accidentally reading notebooks (#1891) ## Changes The workspace extensions filer should not read or stat a notebook called `foo` if the user calls `.Stat(ctx, "foo")`. Instead, the filer should return a file not found error. This is because the contract for the workspace extensions filer is to only work for notebooks when the file path / name includes the extension (example: `foo.ipynb` or `foo.sql` instead of just `foo`) ## Tests Integration tests. --- internal/filer_test.go | 57 +++++++++++++++++++ .../workspace_files_extensions_client.go | 43 +++++++++++++- 2 files changed, 98 insertions(+), 2 deletions(-) diff --git a/internal/filer_test.go b/internal/filer_test.go index 20207d343..a2760d911 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -723,6 +723,63 @@ func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } +func TestAccWorkspaceFilesExtensionsNotebooksAreNotReadAsFiles(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + // Create a notebook + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + require.NoError(t, err) + + // Reading foo should fail. Even though the WSFS name for the notebook is foo + // reading the notebook should only work with the .ipynb extension. + _, err = wf.Read(ctx, "foo") + assert.ErrorIs(t, err, fs.ErrNotExist) + + _, err = wf.Read(ctx, "foo.ipynb") + assert.NoError(t, err) +} + +func TestAccWorkspaceFilesExtensionsNotebooksAreNotStatAsFiles(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + // Create a notebook + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + require.NoError(t, err) + + // Stating foo should fail. Even though the WSFS name for the notebook is foo + // stating the notebook should only work with the .ipynb extension. + _, err = wf.Stat(ctx, "foo") + assert.ErrorIs(t, err, fs.ErrNotExist) + + _, err = wf.Stat(ctx, "foo.ipynb") + assert.NoError(t, err) +} + +func TestAccWorkspaceFilesExtensionsNotebooksAreNotDeletedAsFiles(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + // Create a notebook + err := wf.Write(ctx, "foo.ipynb", strings.NewReader(readFile(t, "testdata/notebooks/py1.ipynb"))) + require.NoError(t, err) + + // Deleting foo should fail. Even though the WSFS name for the notebook is foo + // deleting the notebook should only work with the .ipynb extension. + err = wf.Delete(ctx, "foo") + assert.ErrorIs(t, err, fs.ErrNotExist) + + err = wf.Delete(ctx, "foo.ipynb") + assert.NoError(t, err) +} + func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { t.Parallel() diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index 53b77dd5b..2a6052091 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -244,6 +244,17 @@ func (w *workspaceFilesExtensionsClient) Write(ctx context.Context, name string, // Try to read the file as a regular file. If the file is not found, try to read it as a notebook. func (w *workspaceFilesExtensionsClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { + // Ensure that the file / notebook exists. We do this check here to avoid reading + // the content of a notebook called `foo` when the user actually wanted + // to read the content of a file called `foo`. + // + // To read the content of a notebook called `foo` in the workspace the user + // should use the name with the extension included like `foo.ipynb` or `foo.sql`. + _, err := w.Stat(ctx, name) + if err != nil { + return nil, err + } + r, err := w.wsfs.Read(ctx, name) // If the file is not found, it might be a notebook. @@ -276,7 +287,18 @@ func (w *workspaceFilesExtensionsClient) Delete(ctx context.Context, name string return ReadOnlyError{"delete"} } - err := w.wsfs.Delete(ctx, name, mode...) + // Ensure that the file / notebook exists. We do this check here to avoid + // deleting the a notebook called `foo` when the user actually wanted to + // delete a file called `foo`. + // + // To delete a notebook called `foo` in the workspace the user should use the + // name with the extension included like `foo.ipynb` or `foo.sql`. + _, err := w.Stat(ctx, name) + if err != nil { + return err + } + + err = w.wsfs.Delete(ctx, name, mode...) // If the file is not found, it might be a notebook. if errors.As(err, &FileDoesNotExistError{}) { @@ -315,7 +337,24 @@ func (w *workspaceFilesExtensionsClient) Stat(ctx context.Context, name string) return wsfsFileInfo{ObjectInfo: stat.ObjectInfo}, nil } - return info, err + if err != nil { + return nil, err + } + + // If an object is found and it is a notebook, return a FileDoesNotExistError. + // If a notebook is found by the workspace files client, without having stripped + // the extension, this implies that no file with the same name exists. + // + // This check is done to avoid returning the stat for a notebook called `foo` + // when the user actually wanted to stat a file called `foo`. + // + // To stat the metadata of a notebook called `foo` in the workspace the user + // should use the name with the extension included like `foo.ipynb` or `foo.sql`. + if info.Sys().(workspace.ObjectInfo).ObjectType == workspace.ObjectTypeNotebook { + return nil, FileDoesNotExistError{name} + } + + return info, nil } // Note: The import API returns opaque internal errors for namespace clashes From 4fea0219fddee863c20af68da1d5965412d35a2e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 20 Nov 2024 10:28:35 +0100 Subject: [PATCH 20/42] Use `fs.FS` interface to read template (#1910) ## Changes While working on the v2 of #1744, I found that: * Template initialization first copies built-in templates to a temporary directory before initializing them * Reading a template's contents goes through a `filer.Filer` but is hardcoded to a local one This change updates the interface for reading templates to be `fs.FS`. This is compatible with the `embed.FS` type for the built-in templates, so they no longer have to be copied to a temporary directory before being used. The alternative is to use a `filer.Filer` throughout, but this would have required even more plumbing, and we don't need to _read_ templates, including notebooks, from the workspace filesystem (yet?). As part of making `template.Materialize` take an `fs.FS` argument, the logic to match a given argument to a particular built-in template in the `init` command has moved to sit next to its implementation. ## Tests Existing tests pass. --- cmd/bundle/init.go | 35 +++++++++++++- internal/bundle/helpers.go | 2 +- libs/jsonschema/schema.go | 9 +++- libs/jsonschema/schema_test.go | 7 +++ libs/template/builtin.go | 47 +++++++++++++++++++ libs/template/builtin_test.go | 28 +++++++++++ libs/template/config.go | 6 +-- libs/template/config_test.go | 29 ++++++------ libs/template/file.go | 21 +++++++-- libs/template/file_test.go | 12 ++--- libs/template/helpers_test.go | 16 +++---- libs/template/materialize.go | 77 +++---------------------------- libs/template/materialize_test.go | 6 +-- libs/template/renderer.go | 58 +++++++++++++---------- libs/template/renderer_test.go | 58 ++++++++--------------- 15 files changed, 232 insertions(+), 179 deletions(-) create mode 100644 libs/template/builtin.go create mode 100644 libs/template/builtin_test.go diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 7f2c0efc5..d31a702a1 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -3,6 +3,7 @@ package bundle import ( "errors" "fmt" + "io/fs" "os" "path/filepath" "slices" @@ -109,6 +110,24 @@ func getUrlForNativeTemplate(name string) string { return "" } +func getFsForNativeTemplate(name string) (fs.FS, error) { + builtin, err := template.Builtin() + if err != nil { + return nil, err + } + + // If this is a built-in template, the return value will be non-nil. + var templateFS fs.FS + for _, entry := range builtin { + if entry.Name == name { + templateFS = entry.FS + break + } + } + + return templateFS, nil +} + func isRepoUrl(url string) bool { result := false for _, prefix := range gitUrlPrefixes { @@ -198,9 +217,20 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf if templateDir != "" { return errors.New("--template-dir can only be used with a Git repository URL") } + + templateFS, err := getFsForNativeTemplate(templatePath) + if err != nil { + return err + } + + // If this is not a built-in template, then it must be a local file system path. + if templateFS == nil { + templateFS = os.DirFS(templatePath) + } + // skip downloading the repo because input arg is not a URL. We assume // it's a path on the local file system in that case - return template.Materialize(ctx, configFile, templatePath, outputDir) + return template.Materialize(ctx, configFile, templateFS, outputDir) } // Create a temporary directory with the name of the repository. The '*' @@ -224,7 +254,8 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf // Clean up downloaded repository once the template is materialized. defer os.RemoveAll(repoDir) - return template.Materialize(ctx, configFile, filepath.Join(repoDir, templateDir), outputDir) + templateFS := os.DirFS(filepath.Join(repoDir, templateDir)) + return template.Materialize(ctx, configFile, templateFS, outputDir) } return cmd } diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 8f1a866f6..9740061ec 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -42,7 +42,7 @@ func initTestTemplateWithBundleRoot(t *testing.T, ctx context.Context, templateN cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") ctx = cmdio.InContext(ctx, cmd) - err = template.Materialize(ctx, configFilePath, templateRoot, bundleRoot) + err = template.Materialize(ctx, configFilePath, os.DirFS(templateRoot), bundleRoot) return bundleRoot, err } diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index 7690ec2f7..b9c3fb08c 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -3,7 +3,9 @@ package jsonschema import ( "encoding/json" "fmt" + "io/fs" "os" + "path/filepath" "regexp" "slices" @@ -255,7 +257,12 @@ func (schema *Schema) validate() error { } func Load(path string) (*Schema, error) { - b, err := os.ReadFile(path) + dir, file := filepath.Split(path) + return LoadFS(os.DirFS(dir), file) +} + +func LoadFS(fsys fs.FS, path string) (*Schema, error) { + b, err := fs.ReadFile(fsys, path) if err != nil { return nil, err } diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go index cf1f12767..d66868bb2 100644 --- a/libs/jsonschema/schema_test.go +++ b/libs/jsonschema/schema_test.go @@ -1,6 +1,7 @@ package jsonschema import ( + "os" "testing" "github.com/stretchr/testify/assert" @@ -305,3 +306,9 @@ func TestValidateSchemaSkippedPropertiesHaveDefaults(t *testing.T) { err = s.validate() assert.NoError(t, err) } + +func TestSchema_LoadFS(t *testing.T) { + fsys := os.DirFS("./testdata/schema-load-int") + _, err := LoadFS(fsys, "schema-valid.json") + assert.NoError(t, err) +} diff --git a/libs/template/builtin.go b/libs/template/builtin.go new file mode 100644 index 000000000..dcb3a8858 --- /dev/null +++ b/libs/template/builtin.go @@ -0,0 +1,47 @@ +package template + +import ( + "embed" + "io/fs" +) + +//go:embed all:templates +var builtinTemplates embed.FS + +// BuiltinTemplate represents a template that is built into the CLI. +type BuiltinTemplate struct { + Name string + FS fs.FS +} + +// Builtin returns the list of all built-in templates. +func Builtin() ([]BuiltinTemplate, error) { + templates, err := fs.Sub(builtinTemplates, "templates") + if err != nil { + return nil, err + } + + entries, err := fs.ReadDir(templates, ".") + if err != nil { + return nil, err + } + + var out []BuiltinTemplate + for _, entry := range entries { + if !entry.IsDir() { + continue + } + + templateFS, err := fs.Sub(templates, entry.Name()) + if err != nil { + return nil, err + } + + out = append(out, BuiltinTemplate{ + Name: entry.Name(), + FS: templateFS, + }) + } + + return out, nil +} diff --git a/libs/template/builtin_test.go b/libs/template/builtin_test.go new file mode 100644 index 000000000..504e0acca --- /dev/null +++ b/libs/template/builtin_test.go @@ -0,0 +1,28 @@ +package template + +import ( + "io/fs" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestBuiltin(t *testing.T) { + out, err := Builtin() + require.NoError(t, err) + assert.Len(t, out, 3) + + // Confirm names. + assert.Equal(t, "dbt-sql", out[0].Name) + assert.Equal(t, "default-python", out[1].Name) + assert.Equal(t, "default-sql", out[2].Name) + + // Confirm that the filesystems work. + _, err = fs.Stat(out[0].FS, `template/{{.project_name}}/dbt_project.yml.tmpl`) + assert.NoError(t, err) + _, err = fs.Stat(out[1].FS, `template/{{.project_name}}/tests/main_test.py.tmpl`) + assert.NoError(t, err) + _, err = fs.Stat(out[2].FS, `template/{{.project_name}}/src/orders_daily.sql.tmpl`) + assert.NoError(t, err) +} diff --git a/libs/template/config.go b/libs/template/config.go index 5470aefeb..8e7695b91 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "io/fs" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/jsonschema" @@ -28,9 +29,8 @@ type config struct { schema *jsonschema.Schema } -func newConfig(ctx context.Context, schemaPath string) (*config, error) { - // Read config schema - schema, err := jsonschema.Load(schemaPath) +func newConfig(ctx context.Context, templateFS fs.FS, schemaPath string) (*config, error) { + schema, err := jsonschema.LoadFS(templateFS, schemaPath) if err != nil { return nil, err } diff --git a/libs/template/config_test.go b/libs/template/config_test.go index ab9dbeb5f..49d3423e2 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -3,6 +3,8 @@ package template import ( "context" "fmt" + "os" + "path" "path/filepath" "testing" "text/template" @@ -16,7 +18,7 @@ func TestTemplateConfigAssignValuesFromFile(t *testing.T) { testDir := "./testdata/config-assign-from-file" ctx := context.Background() - c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + c, err := newConfig(ctx, os.DirFS(testDir), "schema.json") require.NoError(t, err) err = c.assignValuesFromFile(filepath.Join(testDir, "config.json")) @@ -32,7 +34,7 @@ func TestTemplateConfigAssignValuesFromFileDoesNotOverwriteExistingConfigs(t *te testDir := "./testdata/config-assign-from-file" ctx := context.Background() - c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + c, err := newConfig(ctx, os.DirFS(testDir), "schema.json") require.NoError(t, err) c.values = map[string]any{ @@ -52,7 +54,7 @@ func TestTemplateConfigAssignValuesFromFileForInvalidIntegerValue(t *testing.T) testDir := "./testdata/config-assign-from-file-invalid-int" ctx := context.Background() - c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + c, err := newConfig(ctx, os.DirFS(testDir), "schema.json") require.NoError(t, err) err = c.assignValuesFromFile(filepath.Join(testDir, "config.json")) @@ -63,7 +65,7 @@ func TestTemplateConfigAssignValuesFromFileFiltersPropertiesNotInTheSchema(t *te testDir := "./testdata/config-assign-from-file-unknown-property" ctx := context.Background() - c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + c, err := newConfig(ctx, os.DirFS(testDir), "schema.json") require.NoError(t, err) err = c.assignValuesFromFile(filepath.Join(testDir, "config.json")) @@ -78,10 +80,10 @@ func TestTemplateConfigAssignValuesFromDefaultValues(t *testing.T) { testDir := "./testdata/config-assign-from-default-value" ctx := context.Background() - c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + c, err := newConfig(ctx, os.DirFS(testDir), "schema.json") require.NoError(t, err) - r, err := newRenderer(ctx, nil, nil, "./testdata/empty/template", "./testdata/empty/library", t.TempDir()) + r, err := newRenderer(ctx, nil, nil, os.DirFS("."), "./testdata/empty/template", "./testdata/empty/library", t.TempDir()) require.NoError(t, err) err = c.assignDefaultValues(r) @@ -97,10 +99,10 @@ func TestTemplateConfigAssignValuesFromTemplatedDefaultValues(t *testing.T) { testDir := "./testdata/config-assign-from-templated-default-value" ctx := context.Background() - c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + c, err := newConfig(ctx, os.DirFS(testDir), "schema.json") require.NoError(t, err) - r, err := newRenderer(ctx, nil, nil, filepath.Join(testDir, "template/template"), filepath.Join(testDir, "template/library"), t.TempDir()) + r, err := newRenderer(ctx, nil, nil, os.DirFS("."), path.Join(testDir, "template/template"), path.Join(testDir, "template/library"), t.TempDir()) require.NoError(t, err) // Note: only the string value is templated. @@ -116,7 +118,7 @@ func TestTemplateConfigAssignValuesFromTemplatedDefaultValues(t *testing.T) { func TestTemplateConfigValidateValuesDefined(t *testing.T) { ctx := context.Background() - c, err := newConfig(ctx, "testdata/config-test-schema/test-schema.json") + c, err := newConfig(ctx, os.DirFS("testdata/config-test-schema"), "test-schema.json") require.NoError(t, err) c.values = map[string]any{ @@ -131,7 +133,7 @@ func TestTemplateConfigValidateValuesDefined(t *testing.T) { func TestTemplateConfigValidateTypeForValidConfig(t *testing.T) { ctx := context.Background() - c, err := newConfig(ctx, "testdata/config-test-schema/test-schema.json") + c, err := newConfig(ctx, os.DirFS("testdata/config-test-schema"), "test-schema.json") require.NoError(t, err) c.values = map[string]any{ @@ -147,7 +149,7 @@ func TestTemplateConfigValidateTypeForValidConfig(t *testing.T) { func TestTemplateConfigValidateTypeForUnknownField(t *testing.T) { ctx := context.Background() - c, err := newConfig(ctx, "testdata/config-test-schema/test-schema.json") + c, err := newConfig(ctx, os.DirFS("testdata/config-test-schema"), "test-schema.json") require.NoError(t, err) c.values = map[string]any{ @@ -164,7 +166,7 @@ func TestTemplateConfigValidateTypeForUnknownField(t *testing.T) { func TestTemplateConfigValidateTypeForInvalidType(t *testing.T) { ctx := context.Background() - c, err := newConfig(ctx, "testdata/config-test-schema/test-schema.json") + c, err := newConfig(ctx, os.DirFS("testdata/config-test-schema"), "test-schema.json") require.NoError(t, err) c.values = map[string]any{ @@ -271,7 +273,8 @@ func TestTemplateEnumValidation(t *testing.T) { } func TestTemplateSchemaErrorsWithEmptyDescription(t *testing.T) { - _, err := newConfig(context.Background(), "./testdata/config-test-schema/invalid-test-schema.json") + ctx := context.Background() + _, err := newConfig(ctx, os.DirFS("./testdata/config-test-schema"), "invalid-test-schema.json") assert.EqualError(t, err, "template property property-without-description is missing a description") } diff --git a/libs/template/file.go b/libs/template/file.go index aafb1acfa..5492ebeb4 100644 --- a/libs/template/file.go +++ b/libs/template/file.go @@ -6,8 +6,7 @@ import ( "io/fs" "os" "path/filepath" - - "github.com/databricks/cli/libs/filer" + "slices" ) // Interface representing a file to be materialized from a template into a project @@ -19,6 +18,10 @@ type file interface { // Write file to disk at the destination path. PersistToDisk() error + + // contents returns the file contents as a byte slice. + // This is used for testing purposes. + contents() ([]byte, error) } type destinationPath struct { @@ -46,8 +49,8 @@ type copyFile struct { dstPath *destinationPath - // Filer rooted at template root. Used to read srcPath. - srcFiler filer.Filer + // [fs.FS] rooted at template root. Used to read srcPath. + srcFS fs.FS // Relative path from template root for file to be copied. srcPath string @@ -63,7 +66,7 @@ func (f *copyFile) PersistToDisk() error { if err != nil { return err } - srcFile, err := f.srcFiler.Read(f.ctx, f.srcPath) + srcFile, err := f.srcFS.Open(f.srcPath) if err != nil { return err } @@ -77,6 +80,10 @@ func (f *copyFile) PersistToDisk() error { return err } +func (f *copyFile) contents() ([]byte, error) { + return fs.ReadFile(f.srcFS, f.srcPath) +} + type inMemoryFile struct { dstPath *destinationPath @@ -99,3 +106,7 @@ func (f *inMemoryFile) PersistToDisk() error { } return os.WriteFile(path, f.content, f.perm) } + +func (f *inMemoryFile) contents() ([]byte, error) { + return slices.Clone(f.content), nil +} diff --git a/libs/template/file_test.go b/libs/template/file_test.go index 85938895e..e1bd54564 100644 --- a/libs/template/file_test.go +++ b/libs/template/file_test.go @@ -8,7 +8,6 @@ import ( "runtime" "testing" - "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -33,10 +32,7 @@ func testInMemoryFile(t *testing.T, perm fs.FileMode) { func testCopyFile(t *testing.T, perm fs.FileMode) { tmpDir := t.TempDir() - - templateFiler, err := filer.NewLocalClient(tmpDir) - require.NoError(t, err) - err = os.WriteFile(filepath.Join(tmpDir, "source"), []byte("qwerty"), perm) + err := os.WriteFile(filepath.Join(tmpDir, "source"), []byte("qwerty"), perm) require.NoError(t, err) f := ©File{ @@ -45,9 +41,9 @@ func testCopyFile(t *testing.T, perm fs.FileMode) { root: tmpDir, relPath: "a/b/c", }, - perm: perm, - srcPath: "source", - srcFiler: templateFiler, + perm: perm, + srcPath: "source", + srcFS: os.DirFS(tmpDir), } err = f.PersistToDisk() assert.NoError(t, err) diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index 8cc7b928e..8a779eccb 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -22,7 +22,7 @@ func TestTemplatePrintStringWithoutProcessing(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/print-without-processing/template", "./testdata/print-without-processing/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/print-without-processing/template", "./testdata/print-without-processing/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -39,7 +39,7 @@ func TestTemplateRegexpCompileFunction(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/regexp-compile/template", "./testdata/regexp-compile/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/regexp-compile/template", "./testdata/regexp-compile/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -57,7 +57,7 @@ func TestTemplateRandIntFunction(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/random-int/template", "./testdata/random-int/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/random-int/template", "./testdata/random-int/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -75,7 +75,7 @@ func TestTemplateUuidFunction(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/uuid/template", "./testdata/uuid/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/uuid/template", "./testdata/uuid/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -92,7 +92,7 @@ func TestTemplateUrlFunction(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/urlparse-function/template", "./testdata/urlparse-function/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/urlparse-function/template", "./testdata/urlparse-function/library", tmpDir) require.NoError(t, err) @@ -109,7 +109,7 @@ func TestTemplateMapPairFunction(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/map-pair/template", "./testdata/map-pair/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/map-pair/template", "./testdata/map-pair/library", tmpDir) require.NoError(t, err) @@ -132,7 +132,7 @@ func TestWorkspaceHost(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, w) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/workspace-host/template", "./testdata/map-pair/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/workspace-host/template", "./testdata/map-pair/library", tmpDir) require.NoError(t, err) @@ -157,7 +157,7 @@ func TestWorkspaceHostNotConfigured(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, w) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/workspace-host/template", "./testdata/map-pair/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/workspace-host/template", "./testdata/map-pair/library", tmpDir) assert.NoError(t, err) diff --git a/libs/template/materialize.go b/libs/template/materialize.go index d824bf381..0163eb7d2 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -2,13 +2,9 @@ package template import ( "context" - "embed" "errors" "fmt" "io/fs" - "os" - "path" - "path/filepath" "github.com/databricks/cli/libs/cmdio" ) @@ -17,39 +13,20 @@ const libraryDirName = "library" const templateDirName = "template" const schemaFileName = "databricks_template_schema.json" -//go:embed all:templates -var builtinTemplates embed.FS - // This function materializes the input templates as a project, using user defined // configurations. // Parameters: // // ctx: context containing a cmdio object. This is used to prompt the user // configFilePath: file path containing user defined config values -// templateRoot: root of the template definition +// templateFS: root of the template definition // outputDir: root of directory where to initialize the template -func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir string) error { - // Use a temporary directory in case any builtin templates like default-python are used - tempDir, err := os.MkdirTemp("", "templates") - defer os.RemoveAll(tempDir) - if err != nil { - return err - } - templateRoot, err = prepareBuiltinTemplates(templateRoot, tempDir) - if err != nil { - return err +func Materialize(ctx context.Context, configFilePath string, templateFS fs.FS, outputDir string) error { + if _, err := fs.Stat(templateFS, schemaFileName); errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("not a bundle template: expected to find a template schema file at %s", schemaFileName) } - templatePath := filepath.Join(templateRoot, templateDirName) - libraryPath := filepath.Join(templateRoot, libraryDirName) - schemaPath := filepath.Join(templateRoot, schemaFileName) - helpers := loadHelpers(ctx) - - if _, err := os.Stat(schemaPath); errors.Is(err, fs.ErrNotExist) { - return fmt.Errorf("not a bundle template: expected to find a template schema file at %s", schemaPath) - } - - config, err := newConfig(ctx, schemaPath) + config, err := newConfig(ctx, templateFS, schemaFileName) if err != nil { return err } @@ -62,7 +39,8 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st } } - r, err := newRenderer(ctx, config.values, helpers, templatePath, libraryPath, outputDir) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, config.values, helpers, templateFS, templateDirName, libraryDirName, outputDir) if err != nil { return err } @@ -111,44 +89,3 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st } return nil } - -// If the given templateRoot matches -func prepareBuiltinTemplates(templateRoot string, tempDir string) (string, error) { - // Check that `templateRoot` is a clean basename, i.e. `some_path` and not `./some_path` or "." - // Return early if that's not the case. - if templateRoot == "." || path.Base(templateRoot) != templateRoot { - return templateRoot, nil - } - - _, err := fs.Stat(builtinTemplates, path.Join("templates", templateRoot)) - if err != nil { - // The given path doesn't appear to be using out built-in templates - return templateRoot, nil - } - - // We have a built-in template with the same name as templateRoot! - // Now we need to make a fully copy of the builtin templates to a real file system - // since template.Parse() doesn't support embed.FS. - err = fs.WalkDir(builtinTemplates, "templates", func(path string, entry fs.DirEntry, err error) error { - if err != nil { - return err - } - - targetPath := filepath.Join(tempDir, path) - if entry.IsDir() { - return os.Mkdir(targetPath, 0755) - } else { - content, err := fs.ReadFile(builtinTemplates, path) - if err != nil { - return err - } - return os.WriteFile(targetPath, content, 0644) - } - }) - - if err != nil { - return "", err - } - - return filepath.Join(tempDir, "templates", templateRoot), nil -} diff --git a/libs/template/materialize_test.go b/libs/template/materialize_test.go index b4be3fe98..dc510a30d 100644 --- a/libs/template/materialize_test.go +++ b/libs/template/materialize_test.go @@ -3,7 +3,7 @@ package template import ( "context" "fmt" - "path/filepath" + "os" "testing" "github.com/databricks/cli/cmd/root" @@ -19,6 +19,6 @@ func TestMaterializeForNonTemplateDirectory(t *testing.T) { ctx := root.SetWorkspaceClient(context.Background(), w) // Try to materialize a non-template directory. - err = Materialize(ctx, "", tmpDir, "") - assert.EqualError(t, err, fmt.Sprintf("not a bundle template: expected to find a template schema file at %s", filepath.Join(tmpDir, schemaFileName))) + err = Materialize(ctx, "", os.DirFS(tmpDir), "") + assert.EqualError(t, err, fmt.Sprintf("not a bundle template: expected to find a template schema file at %s", schemaFileName)) } diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 827f30133..bc8650399 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -8,14 +8,12 @@ import ( "io/fs" "os" "path" - "path/filepath" "regexp" "slices" "sort" "strings" "text/template" - "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/logger" ) @@ -52,32 +50,42 @@ type renderer struct { // do not match any glob patterns from this list skipPatterns []string - // Filer rooted at template root. The file tree from this root is walked to - // generate the project - templateFiler filer.Filer + // [fs.FS] that holds the template's file tree. + srcFS fs.FS // Root directory for the project instantiated from the template instanceRoot string } -func newRenderer(ctx context.Context, config map[string]any, helpers template.FuncMap, templateRoot, libraryRoot, instanceRoot string) (*renderer, error) { +func newRenderer( + ctx context.Context, + config map[string]any, + helpers template.FuncMap, + templateFS fs.FS, + templateDir string, + libraryDir string, + instanceRoot string, +) (*renderer, error) { // Initialize new template, with helper functions loaded tmpl := template.New("").Funcs(helpers) - // Load user defined associated templates from the library root - libraryGlob := filepath.Join(libraryRoot, "*") - matches, err := filepath.Glob(libraryGlob) + // Find user-defined templates in the library directory + matches, err := fs.Glob(templateFS, path.Join(libraryDir, "*")) if err != nil { return nil, err } + + // Parse user-defined templates. + // Note: we do not call [ParseFS] with the glob directly because + // it returns an error if no files match the pattern. if len(matches) != 0 { - tmpl, err = tmpl.ParseFiles(matches...) + tmpl, err = tmpl.ParseFS(templateFS, matches...) if err != nil { return nil, err } } - templateFiler, err := filer.NewLocalClient(templateRoot) + srcFS, err := fs.Sub(templateFS, path.Clean(templateDir)) if err != nil { return nil, err } @@ -85,13 +93,13 @@ func newRenderer(ctx context.Context, config map[string]any, helpers template.Fu ctx = log.NewContext(ctx, log.GetLogger(ctx).With("action", "initialize-template")) return &renderer{ - ctx: ctx, - config: config, - baseTemplate: tmpl, - files: make([]file, 0), - skipPatterns: make([]string, 0), - templateFiler: templateFiler, - instanceRoot: instanceRoot, + ctx: ctx, + config: config, + baseTemplate: tmpl, + files: make([]file, 0), + skipPatterns: make([]string, 0), + srcFS: srcFS, + instanceRoot: instanceRoot, }, nil } @@ -141,7 +149,7 @@ func (r *renderer) executeTemplate(templateDefinition string) (string, error) { func (r *renderer) computeFile(relPathTemplate string) (file, error) { // read file permissions - info, err := r.templateFiler.Stat(r.ctx, relPathTemplate) + info, err := fs.Stat(r.srcFS, relPathTemplate) if err != nil { return nil, err } @@ -161,10 +169,10 @@ func (r *renderer) computeFile(relPathTemplate string) (file, error) { root: r.instanceRoot, relPath: relPath, }, - perm: perm, - ctx: r.ctx, - srcPath: relPathTemplate, - srcFiler: r.templateFiler, + perm: perm, + ctx: r.ctx, + srcFS: r.srcFS, + srcPath: relPathTemplate, }, nil } else { // Trim the .tmpl suffix from file name, if specified in the template @@ -173,7 +181,7 @@ func (r *renderer) computeFile(relPathTemplate string) (file, error) { } // read template file's content - templateReader, err := r.templateFiler.Read(r.ctx, relPathTemplate) + templateReader, err := r.srcFS.Open(relPathTemplate) if err != nil { return nil, err } @@ -263,7 +271,7 @@ func (r *renderer) walk() error { // // 2. For directories: They are appended to a slice, which acts as a queue // allowing BFS traversal of the template file tree - entries, err := r.templateFiler.ReadDir(r.ctx, currentDirectory) + entries, err := fs.ReadDir(r.srcFS, currentDirectory) if err != nil { return err } diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 92133c5fe..9b8861e78 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -3,9 +3,9 @@ package template import ( "context" "fmt" - "io" "io/fs" "os" + "path" "path/filepath" "runtime" "strings" @@ -41,9 +41,8 @@ func assertFilePermissions(t *testing.T, path string, perm fs.FileMode) { func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal bool, build bool, tempDir string) { ctx := context.Background() - templatePath, err := prepareBuiltinTemplates(template, tempDir) + templateFS, err := fs.Sub(builtinTemplates, path.Join("templates", template)) require.NoError(t, err) - libraryPath := filepath.Join(templatePath, "library") w := &databricks.WorkspaceClient{ Config: &workspaceConfig.Config{Host: "https://myhost.com"}, @@ -58,7 +57,7 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri ctx = root.SetWorkspaceClient(ctx, w) helpers := loadHelpers(ctx) - renderer, err := newRenderer(ctx, settings, helpers, templatePath, libraryPath, tempDir) + renderer, err := newRenderer(ctx, settings, helpers, templateFS, templateDirName, libraryDirName, tempDir) require.NoError(t, err) // Evaluate template @@ -67,7 +66,7 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri err = renderer.persistToDisk() require.NoError(t, err) - b, err := bundle.Load(ctx, filepath.Join(tempDir, "template", "my_project")) + b, err := bundle.Load(ctx, filepath.Join(tempDir, "my_project")) require.NoError(t, err) diags := bundle.Apply(ctx, b, phases.LoadNamedTarget(target)) require.NoError(t, diags.Error()) @@ -96,18 +95,6 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri } } -func TestPrepareBuiltInTemplatesWithRelativePaths(t *testing.T) { - // CWD should not be resolved as a built in template - dir, err := prepareBuiltinTemplates(".", t.TempDir()) - assert.NoError(t, err) - assert.Equal(t, ".", dir) - - // relative path should not be resolved as a built in template - dir, err = prepareBuiltinTemplates("./default-python", t.TempDir()) - assert.NoError(t, err) - assert.Equal(t, "./default-python", dir) -} - func TestBuiltinPythonTemplateValid(t *testing.T) { // Test option combinations options := []string{"yes", "no"} @@ -194,7 +181,7 @@ func TestRendererWithAssociatedTemplateInLibrary(t *testing.T) { ctx := context.Background() ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/email/template", "./testdata/email/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/email/template", "./testdata/email/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -381,7 +368,7 @@ func TestRendererWalk(t *testing.T) { tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/walk/template", "./testdata/walk/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/walk/template", "./testdata/walk/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -392,18 +379,9 @@ func TestRendererWalk(t *testing.T) { if f.DstPath().relPath != path { continue } - switch v := f.(type) { - case *inMemoryFile: - return strings.Trim(string(v.content), "\r\n") - case *copyFile: - r, err := r.templateFiler.Read(context.Background(), v.srcPath) - require.NoError(t, err) - b, err := io.ReadAll(r) - require.NoError(t, err) - return strings.Trim(string(b), "\r\n") - default: - require.FailNow(t, "execution should not reach here") - } + b, err := f.contents() + require.NoError(t, err) + return strings.Trim(string(b), "\r\n") } require.FailNow(t, "file is absent: "+path) return "" @@ -422,7 +400,7 @@ func TestRendererFailFunction(t *testing.T) { tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/fail/template", "./testdata/fail/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/fail/template", "./testdata/fail/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -435,7 +413,7 @@ func TestRendererSkipsDirsEagerly(t *testing.T) { tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/skip-dir-eagerly/template", "./testdata/skip-dir-eagerly/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip-dir-eagerly/template", "./testdata/skip-dir-eagerly/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -452,7 +430,7 @@ func TestRendererSkipAllFilesInCurrentDirectory(t *testing.T) { tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/skip-all-files-in-cwd/template", "./testdata/skip-all-files-in-cwd/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip-all-files-in-cwd/template", "./testdata/skip-all-files-in-cwd/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -475,7 +453,7 @@ func TestRendererSkipPatternsAreRelativeToFileDirectory(t *testing.T) { tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/skip-is-relative/template", "./testdata/skip-is-relative/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip-is-relative/template", "./testdata/skip-is-relative/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -493,7 +471,7 @@ func TestRendererSkip(t *testing.T) { tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/skip/template", "./testdata/skip/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip/template", "./testdata/skip/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -525,7 +503,7 @@ func TestRendererReadsPermissionsBits(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/executable-bit-read/template", "./testdata/executable-bit-read/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/executable-bit-read/template", "./testdata/executable-bit-read/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -615,7 +593,7 @@ func TestRendererNonTemplatesAreCreatedAsCopyFiles(t *testing.T) { tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/copy-file-walk/template", "./testdata/copy-file-walk/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/copy-file-walk/template", "./testdata/copy-file-walk/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -635,7 +613,7 @@ func TestRendererFileTreeRendering(t *testing.T) { r, err := newRenderer(ctx, map[string]any{ "dir_name": "my_directory", "file_name": "my_file", - }, helpers, "./testdata/file-tree-rendering/template", "./testdata/file-tree-rendering/library", tmpDir) + }, helpers, os.DirFS("."), "./testdata/file-tree-rendering/template", "./testdata/file-tree-rendering/library", tmpDir) require.NoError(t, err) err = r.walk() @@ -668,7 +646,7 @@ func TestRendererSubTemplateInPath(t *testing.T) { testutil.Touch(t, filepath.Join(templateDir, "template/{{template `dir_name`}}/{{template `file_name`}}")) tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, nil, filepath.Join(templateDir, "template"), filepath.Join(templateDir, "library"), tmpDir) + r, err := newRenderer(ctx, nil, nil, os.DirFS(templateDir), "template", "library", tmpDir) require.NoError(t, err) err = r.walk() From 75b09ff230105eac4ff6a24881289729a8fea64e Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 20 Nov 2024 11:11:31 +0100 Subject: [PATCH 21/42] Use `filer.Filer` to write template instantiation (#1911) ## Changes Prior to this change, the output directory was part of the `renderer` type and passed down to every `file` it produced. Every file knew its absolute destination path. This is incompatible with the use of a filer, where all operations are automatically anchored to some base path. To make this compatible, this change updates: * the `file` type to only know its own path relative to the instantiation root, * the `renderer` type to no longer require or pass along the output directory, * the `persistToDisk` function to take a context and filer argument, * the `filer.WriteMode` to represent permission bits ## Tests * Existing tests pass. * Manually confirmed template initialization works as expected. --- libs/filer/filer.go | 13 +++- libs/filer/filer_test.go | 12 ++++ libs/filer/local_client.go | 13 +++- libs/template/config_test.go | 4 +- libs/template/file.go | 84 ++++++++----------------- libs/template/file_test.go | 62 +++++++------------ libs/template/helpers_test.go | 24 +++---- libs/template/materialize.go | 10 ++- libs/template/renderer.go | 32 +++------- libs/template/renderer_test.go | 110 +++++++++++++++------------------ 10 files changed, 161 insertions(+), 203 deletions(-) create mode 100644 libs/filer/filer_test.go diff --git a/libs/filer/filer.go b/libs/filer/filer.go index fcfbcea07..b5be4c3c2 100644 --- a/libs/filer/filer.go +++ b/libs/filer/filer.go @@ -7,13 +7,24 @@ import ( "io/fs" ) +// WriteMode captures intent when writing a file. +// +// The first 9 bits are reserved for the [fs.FileMode] permission bits. +// These are used only by the local filer implementation and have +// no effect for the other implementations. type WriteMode int +// writeModePerm is a mask to extract permission bits from a WriteMode. +const writeModePerm = WriteMode(fs.ModePerm) + const ( - OverwriteIfExists WriteMode = 1 << iota + // Note: these constants are defined as powers of 2 to support combining them using a bit-wise OR. + // They starts from the 10th bit (permission mask + 1) to avoid conflicts with the permission bits. + OverwriteIfExists WriteMode = (writeModePerm + 1) << iota CreateParentDirectories ) +// DeleteMode captures intent when deleting a file. type DeleteMode int const ( diff --git a/libs/filer/filer_test.go b/libs/filer/filer_test.go new file mode 100644 index 000000000..bacea730b --- /dev/null +++ b/libs/filer/filer_test.go @@ -0,0 +1,12 @@ +package filer + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestWriteMode(t *testing.T) { + assert.Equal(t, 512, int(OverwriteIfExists)) + assert.Equal(t, 1024, int(CreateParentDirectories)) +} diff --git a/libs/filer/local_client.go b/libs/filer/local_client.go index 48e8a05ee..8b25345fc 100644 --- a/libs/filer/local_client.go +++ b/libs/filer/local_client.go @@ -28,6 +28,15 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, return err } + // Retrieve permission mask from the [WriteMode], if present. + perm := fs.FileMode(0644) + for _, m := range mode { + bits := m & writeModePerm + if bits != 0 { + perm = fs.FileMode(bits) + } + } + flags := os.O_WRONLY | os.O_CREATE if slices.Contains(mode, OverwriteIfExists) { flags |= os.O_TRUNC @@ -35,7 +44,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, flags |= os.O_EXCL } - f, err := os.OpenFile(absPath, flags, 0644) + f, err := os.OpenFile(absPath, flags, perm) if errors.Is(err, fs.ErrNotExist) && slices.Contains(mode, CreateParentDirectories) { // Create parent directories if they don't exist. err = os.MkdirAll(filepath.Dir(absPath), 0755) @@ -43,7 +52,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, return err } // Try again. - f, err = os.OpenFile(absPath, flags, 0644) + f, err = os.OpenFile(absPath, flags, perm) } if err != nil { diff --git a/libs/template/config_test.go b/libs/template/config_test.go index 49d3423e2..a855019b6 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -83,7 +83,7 @@ func TestTemplateConfigAssignValuesFromDefaultValues(t *testing.T) { c, err := newConfig(ctx, os.DirFS(testDir), "schema.json") require.NoError(t, err) - r, err := newRenderer(ctx, nil, nil, os.DirFS("."), "./testdata/empty/template", "./testdata/empty/library", t.TempDir()) + r, err := newRenderer(ctx, nil, nil, os.DirFS("."), "./testdata/empty/template", "./testdata/empty/library") require.NoError(t, err) err = c.assignDefaultValues(r) @@ -102,7 +102,7 @@ func TestTemplateConfigAssignValuesFromTemplatedDefaultValues(t *testing.T) { c, err := newConfig(ctx, os.DirFS(testDir), "schema.json") require.NoError(t, err) - r, err := newRenderer(ctx, nil, nil, os.DirFS("."), path.Join(testDir, "template/template"), path.Join(testDir, "template/library"), t.TempDir()) + r, err := newRenderer(ctx, nil, nil, os.DirFS("."), path.Join(testDir, "template/template"), path.Join(testDir, "template/library")) require.NoError(t, err) // Note: only the string value is templated. diff --git a/libs/template/file.go b/libs/template/file.go index 5492ebeb4..36d079b3f 100644 --- a/libs/template/file.go +++ b/libs/template/file.go @@ -1,53 +1,36 @@ package template import ( + "bytes" "context" - "io" "io/fs" - "os" - "path/filepath" "slices" + + "github.com/databricks/cli/libs/filer" ) // Interface representing a file to be materialized from a template into a project // instance type file interface { - // Destination path for file. This is where the file will be created when - // PersistToDisk is called. - DstPath() *destinationPath + // Path of the file relative to the root of the instantiated template. + // This is where the file is written to when persisting the template to disk. + // Must be slash-separated. + RelPath() string // Write file to disk at the destination path. - PersistToDisk() error + Write(ctx context.Context, out filer.Filer) error // contents returns the file contents as a byte slice. // This is used for testing purposes. contents() ([]byte, error) } -type destinationPath struct { - // Root path for the project instance. This path uses the system's default - // file separator. For example /foo/bar on Unix and C:\foo\bar on windows - root string - - // Unix like file path relative to the "root" of the instantiated project. Is used to - // evaluate whether the file should be skipped by comparing it to a list of - // skip glob patterns. - relPath string -} - -// Absolute path of the file, in the os native format. For example /foo/bar on -// Unix and C:\foo\bar on windows -func (f *destinationPath) absPath() string { - return filepath.Join(f.root, filepath.FromSlash(f.relPath)) -} - type copyFile struct { - ctx context.Context - // Permissions bits for the destination file perm fs.FileMode - dstPath *destinationPath + // Destination path for the file. + relPath string // [fs.FS] rooted at template root. Used to read srcPath. srcFS fs.FS @@ -56,28 +39,17 @@ type copyFile struct { srcPath string } -func (f *copyFile) DstPath() *destinationPath { - return f.dstPath +func (f *copyFile) RelPath() string { + return f.relPath } -func (f *copyFile) PersistToDisk() error { - path := f.DstPath().absPath() - err := os.MkdirAll(filepath.Dir(path), 0755) +func (f *copyFile) Write(ctx context.Context, out filer.Filer) error { + src, err := f.srcFS.Open(f.srcPath) if err != nil { return err } - srcFile, err := f.srcFS.Open(f.srcPath) - if err != nil { - return err - } - defer srcFile.Close() - dstFile, err := os.OpenFile(path, os.O_CREATE|os.O_EXCL|os.O_WRONLY, f.perm) - if err != nil { - return err - } - defer dstFile.Close() - _, err = io.Copy(dstFile, srcFile) - return err + defer src.Close() + return out.Write(ctx, f.relPath, src, filer.CreateParentDirectories, filer.WriteMode(f.perm)) } func (f *copyFile) contents() ([]byte, error) { @@ -85,26 +57,22 @@ func (f *copyFile) contents() ([]byte, error) { } type inMemoryFile struct { - dstPath *destinationPath - - content []byte - // Permissions bits for the destination file perm fs.FileMode + + // Destination path for the file. + relPath string + + // Contents of the file. + content []byte } -func (f *inMemoryFile) DstPath() *destinationPath { - return f.dstPath +func (f *inMemoryFile) RelPath() string { + return f.relPath } -func (f *inMemoryFile) PersistToDisk() error { - path := f.DstPath().absPath() - - err := os.MkdirAll(filepath.Dir(path), 0755) - if err != nil { - return err - } - return os.WriteFile(path, f.content, f.perm) +func (f *inMemoryFile) Write(ctx context.Context, out filer.Filer) error { + return out.Write(ctx, f.relPath, bytes.NewReader(f.content), filer.CreateParentDirectories, filer.WriteMode(f.perm)) } func (f *inMemoryFile) contents() ([]byte, error) { diff --git a/libs/template/file_test.go b/libs/template/file_test.go index e1bd54564..bd5f6d632 100644 --- a/libs/template/file_test.go +++ b/libs/template/file_test.go @@ -8,77 +8,56 @@ import ( "runtime" "testing" + "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func testInMemoryFile(t *testing.T, perm fs.FileMode) { +func testInMemoryFile(t *testing.T, ctx context.Context, perm fs.FileMode) { tmpDir := t.TempDir() f := &inMemoryFile{ - dstPath: &destinationPath{ - root: tmpDir, - relPath: "a/b/c", - }, perm: perm, + relPath: "a/b/c", content: []byte("123"), } - err := f.PersistToDisk() + + out, err := filer.NewLocalClient(tmpDir) + require.NoError(t, err) + err = f.Write(ctx, out) assert.NoError(t, err) assertFileContent(t, filepath.Join(tmpDir, "a/b/c"), "123") assertFilePermissions(t, filepath.Join(tmpDir, "a/b/c"), perm) } -func testCopyFile(t *testing.T, perm fs.FileMode) { +func testCopyFile(t *testing.T, ctx context.Context, perm fs.FileMode) { tmpDir := t.TempDir() err := os.WriteFile(filepath.Join(tmpDir, "source"), []byte("qwerty"), perm) require.NoError(t, err) f := ©File{ - ctx: context.Background(), - dstPath: &destinationPath{ - root: tmpDir, - relPath: "a/b/c", - }, perm: perm, - srcPath: "source", + relPath: "a/b/c", srcFS: os.DirFS(tmpDir), + srcPath: "source", } - err = f.PersistToDisk() + + out, err := filer.NewLocalClient(tmpDir) + require.NoError(t, err) + err = f.Write(ctx, out) assert.NoError(t, err) assertFileContent(t, filepath.Join(tmpDir, "a/b/c"), "qwerty") assertFilePermissions(t, filepath.Join(tmpDir, "a/b/c"), perm) } -func TestTemplateFileDestinationPath(t *testing.T) { - if runtime.GOOS == "windows" { - t.SkipNow() - } - f := &destinationPath{ - root: `a/b/c`, - relPath: "d/e", - } - assert.Equal(t, `a/b/c/d/e`, f.absPath()) -} - -func TestTemplateFileDestinationPathForWindows(t *testing.T) { - if runtime.GOOS != "windows" { - t.SkipNow() - } - f := &destinationPath{ - root: `c:\a\b\c`, - relPath: "d/e", - } - assert.Equal(t, `c:\a\b\c\d\e`, f.absPath()) -} - func TestTemplateInMemoryFilePersistToDisk(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } - testInMemoryFile(t, 0755) + ctx := context.Background() + testInMemoryFile(t, ctx, 0755) } func TestTemplateInMemoryFilePersistToDiskForWindows(t *testing.T) { @@ -87,14 +66,16 @@ func TestTemplateInMemoryFilePersistToDiskForWindows(t *testing.T) { } // we have separate tests for windows because of differences in valid // fs.FileMode values we can use for different operating systems. - testInMemoryFile(t, 0666) + ctx := context.Background() + testInMemoryFile(t, ctx, 0666) } func TestTemplateCopyFilePersistToDisk(t *testing.T) { if runtime.GOOS == "windows" { t.SkipNow() } - testCopyFile(t, 0644) + ctx := context.Background() + testCopyFile(t, ctx, 0644) } func TestTemplateCopyFilePersistToDiskForWindows(t *testing.T) { @@ -103,5 +84,6 @@ func TestTemplateCopyFilePersistToDiskForWindows(t *testing.T) { } // we have separate tests for windows because of differences in valid // fs.FileMode values we can use for different operating systems. - testCopyFile(t, 0666) + ctx := context.Background() + testCopyFile(t, ctx, 0666) } diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index 8a779eccb..9f5804c03 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -18,11 +18,10 @@ import ( func TestTemplatePrintStringWithoutProcessing(t *testing.T) { ctx := context.Background() - tmpDir := t.TempDir() ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/print-without-processing/template", "./testdata/print-without-processing/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/print-without-processing/template", "./testdata/print-without-processing/library") require.NoError(t, err) err = r.walk() @@ -35,11 +34,10 @@ func TestTemplatePrintStringWithoutProcessing(t *testing.T) { func TestTemplateRegexpCompileFunction(t *testing.T) { ctx := context.Background() - tmpDir := t.TempDir() ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/regexp-compile/template", "./testdata/regexp-compile/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/regexp-compile/template", "./testdata/regexp-compile/library") require.NoError(t, err) err = r.walk() @@ -53,11 +51,10 @@ func TestTemplateRegexpCompileFunction(t *testing.T) { func TestTemplateRandIntFunction(t *testing.T) { ctx := context.Background() - tmpDir := t.TempDir() ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/random-int/template", "./testdata/random-int/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/random-int/template", "./testdata/random-int/library") require.NoError(t, err) err = r.walk() @@ -71,11 +68,10 @@ func TestTemplateRandIntFunction(t *testing.T) { func TestTemplateUuidFunction(t *testing.T) { ctx := context.Background() - tmpDir := t.TempDir() ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/uuid/template", "./testdata/uuid/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/uuid/template", "./testdata/uuid/library") require.NoError(t, err) err = r.walk() @@ -88,11 +84,10 @@ func TestTemplateUuidFunction(t *testing.T) { func TestTemplateUrlFunction(t *testing.T) { ctx := context.Background() - tmpDir := t.TempDir() ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/urlparse-function/template", "./testdata/urlparse-function/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/urlparse-function/template", "./testdata/urlparse-function/library") require.NoError(t, err) @@ -105,11 +100,10 @@ func TestTemplateUrlFunction(t *testing.T) { func TestTemplateMapPairFunction(t *testing.T) { ctx := context.Background() - tmpDir := t.TempDir() ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/map-pair/template", "./testdata/map-pair/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/map-pair/template", "./testdata/map-pair/library") require.NoError(t, err) @@ -122,7 +116,6 @@ func TestTemplateMapPairFunction(t *testing.T) { func TestWorkspaceHost(t *testing.T) { ctx := context.Background() - tmpDir := t.TempDir() w := &databricks.WorkspaceClient{ Config: &workspaceConfig.Config{ @@ -132,7 +125,7 @@ func TestWorkspaceHost(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, w) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/workspace-host/template", "./testdata/map-pair/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/workspace-host/template", "./testdata/map-pair/library") require.NoError(t, err) @@ -149,7 +142,6 @@ func TestWorkspaceHostNotConfigured(t *testing.T) { ctx := context.Background() cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template") ctx = cmdio.InContext(ctx, cmd) - tmpDir := t.TempDir() w := &databricks.WorkspaceClient{ Config: &workspaceConfig.Config{}, @@ -157,7 +149,7 @@ func TestWorkspaceHostNotConfigured(t *testing.T) { ctx = root.SetWorkspaceClient(ctx, w) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/workspace-host/template", "./testdata/map-pair/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/workspace-host/template", "./testdata/map-pair/library") assert.NoError(t, err) diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 0163eb7d2..8338e119e 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -7,6 +7,7 @@ import ( "io/fs" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/filer" ) const libraryDirName = "library" @@ -40,7 +41,7 @@ func Materialize(ctx context.Context, configFilePath string, templateFS fs.FS, o } helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, config.values, helpers, templateFS, templateDirName, libraryDirName, outputDir) + r, err := newRenderer(ctx, config.values, helpers, templateFS, templateDirName, libraryDirName) if err != nil { return err } @@ -72,7 +73,12 @@ func Materialize(ctx context.Context, configFilePath string, templateFS fs.FS, o return err } - err = r.persistToDisk() + out, err := filer.NewLocalClient(outputDir) + if err != nil { + return err + } + + err = r.persistToDisk(ctx, out) if err != nil { return err } diff --git a/libs/template/renderer.go b/libs/template/renderer.go index bc8650399..0f30a67d0 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "io/fs" - "os" "path" "regexp" "slices" @@ -14,6 +13,7 @@ import ( "strings" "text/template" + "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/logger" ) @@ -52,9 +52,6 @@ type renderer struct { // [fs.FS] that holds the template's file tree. srcFS fs.FS - - // Root directory for the project instantiated from the template - instanceRoot string } func newRenderer( @@ -64,7 +61,6 @@ func newRenderer( templateFS fs.FS, templateDir string, libraryDir string, - instanceRoot string, ) (*renderer, error) { // Initialize new template, with helper functions loaded tmpl := template.New("").Funcs(helpers) @@ -99,7 +95,6 @@ func newRenderer( files: make([]file, 0), skipPatterns: make([]string, 0), srcFS: srcFS, - instanceRoot: instanceRoot, }, nil } @@ -165,12 +160,8 @@ func (r *renderer) computeFile(relPathTemplate string) (file, error) { // over as is, without treating it as a template if !strings.HasSuffix(relPathTemplate, templateExtension) { return ©File{ - dstPath: &destinationPath{ - root: r.instanceRoot, - relPath: relPath, - }, perm: perm, - ctx: r.ctx, + relPath: relPath, srcFS: r.srcFS, srcPath: relPathTemplate, }, nil @@ -202,11 +193,8 @@ func (r *renderer) computeFile(relPathTemplate string) (file, error) { } return &inMemoryFile{ - dstPath: &destinationPath{ - root: r.instanceRoot, - relPath: relPath, - }, perm: perm, + relPath: relPath, content: []byte(content), }, nil } @@ -291,7 +279,7 @@ func (r *renderer) walk() error { if err != nil { return err } - logger.Infof(r.ctx, "added file to list of possible project files: %s", f.DstPath().relPath) + logger.Infof(r.ctx, "added file to list of possible project files: %s", f.RelPath()) r.files = append(r.files, f) } @@ -299,17 +287,17 @@ func (r *renderer) walk() error { return nil } -func (r *renderer) persistToDisk() error { +func (r *renderer) persistToDisk(ctx context.Context, out filer.Filer) error { // Accumulate files which we will persist, skipping files whose path matches // any of the skip patterns filesToPersist := make([]file, 0) for _, file := range r.files { - match, err := isSkipped(file.DstPath().relPath, r.skipPatterns) + match, err := isSkipped(file.RelPath(), r.skipPatterns) if err != nil { return err } if match { - log.Infof(r.ctx, "skipping file: %s", file.DstPath()) + log.Infof(r.ctx, "skipping file: %s", file.RelPath()) continue } filesToPersist = append(filesToPersist, file) @@ -317,8 +305,8 @@ func (r *renderer) persistToDisk() error { // Assert no conflicting files exist for _, file := range filesToPersist { - path := file.DstPath().absPath() - _, err := os.Stat(path) + path := file.RelPath() + _, err := out.Stat(ctx, path) if err == nil { return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path) } @@ -329,7 +317,7 @@ func (r *renderer) persistToDisk() error { // Persist files to disk for _, file := range filesToPersist { - err := file.PersistToDisk() + err := file.Write(ctx, out) if err != nil { return err } diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 9b8861e78..a4b9166da 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -18,6 +18,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/tags" "github.com/databricks/databricks-sdk-go" workspaceConfig "github.com/databricks/databricks-sdk-go/config" @@ -57,13 +58,15 @@ func assertBuiltinTemplateValid(t *testing.T, template string, settings map[stri ctx = root.SetWorkspaceClient(ctx, w) helpers := loadHelpers(ctx) - renderer, err := newRenderer(ctx, settings, helpers, templateFS, templateDirName, libraryDirName, tempDir) + renderer, err := newRenderer(ctx, settings, helpers, templateFS, templateDirName, libraryDirName) require.NoError(t, err) // Evaluate template err = renderer.walk() require.NoError(t, err) - err = renderer.persistToDisk() + out, err := filer.NewLocalClient(tempDir) + require.NoError(t, err) + err = renderer.persistToDisk(ctx, out) require.NoError(t, err) b, err := bundle.Load(ctx, filepath.Join(tempDir, "my_project")) @@ -181,13 +184,14 @@ func TestRendererWithAssociatedTemplateInLibrary(t *testing.T) { ctx := context.Background() ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/email/template", "./testdata/email/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/email/template", "./testdata/email/library") require.NoError(t, err) err = r.walk() require.NoError(t, err) - - err = r.persistToDisk() + out, err := filer.NewLocalClient(tmpDir) + require.NoError(t, err) + err = r.persistToDisk(ctx, out) require.NoError(t, err) b, err := os.ReadFile(filepath.Join(tmpDir, "my_email")) @@ -312,45 +316,34 @@ func TestRendererPersistToDisk(t *testing.T) { r := &renderer{ ctx: ctx, - instanceRoot: tmpDir, skipPatterns: []string{"a/b/c", "mn*"}, files: []file{ &inMemoryFile{ - dstPath: &destinationPath{ - root: tmpDir, - relPath: "a/b/c", - }, perm: 0444, + relPath: "a/b/c", content: nil, }, &inMemoryFile{ - dstPath: &destinationPath{ - root: tmpDir, - relPath: "mno", - }, perm: 0444, + relPath: "mno", content: nil, }, &inMemoryFile{ - dstPath: &destinationPath{ - root: tmpDir, - relPath: "a/b/d", - }, perm: 0444, + relPath: "a/b/d", content: []byte("123"), }, &inMemoryFile{ - dstPath: &destinationPath{ - root: tmpDir, - relPath: "mmnn", - }, perm: 0444, + relPath: "mmnn", content: []byte("456"), }, }, } - err := r.persistToDisk() + out, err := filer.NewLocalClient(tmpDir) + require.NoError(t, err) + err = r.persistToDisk(ctx, out) require.NoError(t, err) assert.NoFileExists(t, filepath.Join(tmpDir, "a", "b", "c")) @@ -365,10 +358,9 @@ func TestRendererPersistToDisk(t *testing.T) { func TestRendererWalk(t *testing.T) { ctx := context.Background() ctx = root.SetWorkspaceClient(ctx, nil) - tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/walk/template", "./testdata/walk/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/walk/template", "./testdata/walk/library") require.NoError(t, err) err = r.walk() @@ -376,7 +368,7 @@ func TestRendererWalk(t *testing.T) { getContent := func(r *renderer, path string) string { for _, f := range r.files { - if f.DstPath().relPath != path { + if f.RelPath() != path { continue } b, err := f.contents() @@ -397,10 +389,9 @@ func TestRendererWalk(t *testing.T) { func TestRendererFailFunction(t *testing.T) { ctx := context.Background() ctx = root.SetWorkspaceClient(ctx, nil) - tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/fail/template", "./testdata/fail/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/fail/template", "./testdata/fail/library") require.NoError(t, err) err = r.walk() @@ -410,10 +401,9 @@ func TestRendererFailFunction(t *testing.T) { func TestRendererSkipsDirsEagerly(t *testing.T) { ctx := context.Background() ctx = root.SetWorkspaceClient(ctx, nil) - tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip-dir-eagerly/template", "./testdata/skip-dir-eagerly/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip-dir-eagerly/template", "./testdata/skip-dir-eagerly/library") require.NoError(t, err) err = r.walk() @@ -430,7 +420,7 @@ func TestRendererSkipAllFilesInCurrentDirectory(t *testing.T) { tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip-all-files-in-cwd/template", "./testdata/skip-all-files-in-cwd/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip-all-files-in-cwd/template", "./testdata/skip-all-files-in-cwd/library") require.NoError(t, err) err = r.walk() @@ -438,7 +428,9 @@ func TestRendererSkipAllFilesInCurrentDirectory(t *testing.T) { // All 3 files are executed and have in memory representations require.Len(t, r.files, 3) - err = r.persistToDisk() + out, err := filer.NewLocalClient(tmpDir) + require.NoError(t, err) + err = r.persistToDisk(ctx, out) require.NoError(t, err) entries, err := os.ReadDir(tmpDir) @@ -450,10 +442,9 @@ func TestRendererSkipAllFilesInCurrentDirectory(t *testing.T) { func TestRendererSkipPatternsAreRelativeToFileDirectory(t *testing.T) { ctx := context.Background() ctx = root.SetWorkspaceClient(ctx, nil) - tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip-is-relative/template", "./testdata/skip-is-relative/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip-is-relative/template", "./testdata/skip-is-relative/library") require.NoError(t, err) err = r.walk() @@ -471,7 +462,7 @@ func TestRendererSkip(t *testing.T) { tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip/template", "./testdata/skip/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/skip/template", "./testdata/skip/library") require.NoError(t, err) err = r.walk() @@ -480,7 +471,9 @@ func TestRendererSkip(t *testing.T) { // This is because "dir2/*" matches the files in dir2, but not dir2 itself assert.Len(t, r.files, 6) - err = r.persistToDisk() + out, err := filer.NewLocalClient(tmpDir) + require.NoError(t, err) + err = r.persistToDisk(ctx, out) require.NoError(t, err) assert.FileExists(t, filepath.Join(tmpDir, "file1")) @@ -498,12 +491,11 @@ func TestRendererReadsPermissionsBits(t *testing.T) { if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { t.SkipNow() } - tmpDir := t.TempDir() ctx := context.Background() ctx = root.SetWorkspaceClient(ctx, nil) helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/executable-bit-read/template", "./testdata/executable-bit-read/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/executable-bit-read/template", "./testdata/executable-bit-read/library") require.NoError(t, err) err = r.walk() @@ -511,7 +503,7 @@ func TestRendererReadsPermissionsBits(t *testing.T) { getPermissions := func(r *renderer, path string) fs.FileMode { for _, f := range r.files { - if f.DstPath().relPath != path { + if f.RelPath() != path { continue } switch v := f.(type) { @@ -534,6 +526,7 @@ func TestRendererReadsPermissionsBits(t *testing.T) { func TestRendererErrorOnConflictingFile(t *testing.T) { tmpDir := t.TempDir() + ctx := context.Background() f, err := os.Create(filepath.Join(tmpDir, "a")) require.NoError(t, err) @@ -544,17 +537,16 @@ func TestRendererErrorOnConflictingFile(t *testing.T) { skipPatterns: []string{}, files: []file{ &inMemoryFile{ - dstPath: &destinationPath{ - root: tmpDir, - relPath: "a", - }, perm: 0444, + relPath: "a", content: []byte("123"), }, }, } - err = r.persistToDisk() - assert.EqualError(t, err, fmt.Sprintf("failed to initialize template, one or more files already exist: %s", filepath.Join(tmpDir, "a"))) + out, err := filer.NewLocalClient(tmpDir) + require.NoError(t, err) + err = r.persistToDisk(ctx, out) + assert.EqualError(t, err, fmt.Sprintf("failed to initialize template, one or more files already exist: %s", "a")) } func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { @@ -571,16 +563,15 @@ func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { skipPatterns: []string{"a"}, files: []file{ &inMemoryFile{ - dstPath: &destinationPath{ - root: tmpDir, - relPath: "a", - }, perm: 0444, + relPath: "a", content: []byte("123"), }, }, } - err = r.persistToDisk() + out, err := filer.NewLocalClient(tmpDir) + require.NoError(t, err) + err = r.persistToDisk(ctx, out) // No error is returned even though a conflicting file exists. This is because // the generated file is being skipped assert.NoError(t, err) @@ -590,10 +581,9 @@ func TestRendererNoErrorOnConflictingFileIfSkipped(t *testing.T) { func TestRendererNonTemplatesAreCreatedAsCopyFiles(t *testing.T) { ctx := context.Background() ctx = root.SetWorkspaceClient(ctx, nil) - tmpDir := t.TempDir() helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/copy-file-walk/template", "./testdata/copy-file-walk/library", tmpDir) + r, err := newRenderer(ctx, nil, helpers, os.DirFS("."), "./testdata/copy-file-walk/template", "./testdata/copy-file-walk/library") require.NoError(t, err) err = r.walk() @@ -601,7 +591,7 @@ func TestRendererNonTemplatesAreCreatedAsCopyFiles(t *testing.T) { assert.Len(t, r.files, 1) assert.Equal(t, r.files[0].(*copyFile).srcPath, "not-a-template") - assert.Equal(t, r.files[0].DstPath().absPath(), filepath.Join(tmpDir, "not-a-template")) + assert.Equal(t, r.files[0].RelPath(), "not-a-template") } func TestRendererFileTreeRendering(t *testing.T) { @@ -613,7 +603,7 @@ func TestRendererFileTreeRendering(t *testing.T) { r, err := newRenderer(ctx, map[string]any{ "dir_name": "my_directory", "file_name": "my_file", - }, helpers, os.DirFS("."), "./testdata/file-tree-rendering/template", "./testdata/file-tree-rendering/library", tmpDir) + }, helpers, os.DirFS("."), "./testdata/file-tree-rendering/template", "./testdata/file-tree-rendering/library") require.NoError(t, err) err = r.walk() @@ -621,9 +611,11 @@ func TestRendererFileTreeRendering(t *testing.T) { // Assert in memory representation is created. assert.Len(t, r.files, 1) - assert.Equal(t, r.files[0].DstPath().absPath(), filepath.Join(tmpDir, "my_directory", "my_file")) + assert.Equal(t, r.files[0].RelPath(), "my_directory/my_file") - err = r.persistToDisk() + out, err := filer.NewLocalClient(tmpDir) + require.NoError(t, err) + err = r.persistToDisk(ctx, out) require.NoError(t, err) // Assert files and directories are correctly materialized. @@ -645,8 +637,7 @@ func TestRendererSubTemplateInPath(t *testing.T) { // https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file. testutil.Touch(t, filepath.Join(templateDir, "template/{{template `dir_name`}}/{{template `file_name`}}")) - tmpDir := t.TempDir() - r, err := newRenderer(ctx, nil, nil, os.DirFS(templateDir), "template", "library", tmpDir) + r, err := newRenderer(ctx, nil, nil, os.DirFS(templateDir), "template", "library") require.NoError(t, err) err = r.walk() @@ -654,7 +645,6 @@ func TestRendererSubTemplateInPath(t *testing.T) { if assert.Len(t, r.files, 2) { f := r.files[1] - assert.Equal(t, filepath.Join(tmpDir, "my_directory", "my_file"), f.DstPath().absPath()) - assert.Equal(t, "my_directory/my_file", f.DstPath().relPath) + assert.Equal(t, "my_directory/my_file", f.RelPath()) } } From 886e14910cd921e08bc66b90c46192b85d4756bd Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 20 Nov 2024 12:42:23 +0100 Subject: [PATCH 22/42] Fix template initialization when running on Databricks (#1912) ## Changes When running the CLI on Databricks Runtime (DBR), use the extension-aware filer to write an instantiated template if the instance path is located in the workspace filesystem. Notebooks cannot be written through the workspace filesystem's FUSE mount. As a result, this is the only method for initializing templates that contain notebooks when running the CLI on DBR and writing to the workspace filesystem. Depends on #1910 and #1911. Supersedes #1744. ## Tests * Manually confirmed I can initialize a template with notebooks when running the CLI from the web terminal. --- cmd/bundle/init.go | 32 +++++++++++++++++++++++++++++-- internal/bundle/helpers.go | 5 ++++- libs/template/materialize.go | 11 +++-------- libs/template/materialize_test.go | 2 +- 4 files changed, 38 insertions(+), 12 deletions(-) diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index d31a702a1..687c141ec 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -1,6 +1,7 @@ package bundle import ( + "context" "errors" "fmt" "io/fs" @@ -11,6 +12,8 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/template" "github.com/spf13/cobra" @@ -147,6 +150,26 @@ func repoName(url string) string { return parts[len(parts)-1] } +func constructOutputFiler(ctx context.Context, outputDir string) (filer.Filer, error) { + outputDir, err := filepath.Abs(outputDir) + if err != nil { + return nil, err + } + + // If the CLI is running on DBR and we're writing to the workspace file system, + // use the extension-aware workspace filesystem filer to instantiate the template. + // + // It is not possible to write notebooks through the workspace filesystem's FUSE mount. + // Therefore this is the only way we can initialize templates that contain notebooks + // when running the CLI on DBR and initializing a template to the workspace. + // + if strings.HasPrefix(outputDir, "/Workspace/") && dbr.RunsOnRuntime(ctx) { + return filer.NewWorkspaceFilesExtensionsClient(root.WorkspaceClient(ctx), outputDir) + } + + return filer.NewLocalClient(outputDir) +} + func newInitCommand() *cobra.Command { cmd := &cobra.Command{ Use: "init [TEMPLATE_PATH]", @@ -201,6 +224,11 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf templatePath = getNativeTemplateByDescription(description) } + outputFiler, err := constructOutputFiler(ctx, outputDir) + if err != nil { + return err + } + if templatePath == customTemplate { cmdio.LogString(ctx, "Please specify a path or Git repository to use a custom template.") cmdio.LogString(ctx, "See https://docs.databricks.com/en/dev-tools/bundles/templates.html to learn more about custom templates.") @@ -230,7 +258,7 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf // skip downloading the repo because input arg is not a URL. We assume // it's a path on the local file system in that case - return template.Materialize(ctx, configFile, templateFS, outputDir) + return template.Materialize(ctx, configFile, templateFS, outputFiler) } // Create a temporary directory with the name of the repository. The '*' @@ -255,7 +283,7 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf // Clean up downloaded repository once the template is materialized. defer os.RemoveAll(repoDir) templateFS := os.DirFS(filepath.Join(repoDir, templateDir)) - return template.Materialize(ctx, configFile, templateFS, outputDir) + return template.Materialize(ctx, configFile, templateFS, outputFiler) } return cmd } diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 9740061ec..dd9c841c9 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -16,6 +16,7 @@ import ( "github.com/databricks/cli/internal" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/template" "github.com/databricks/cli/libs/vfs" @@ -42,7 +43,9 @@ func initTestTemplateWithBundleRoot(t *testing.T, ctx context.Context, templateN cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") ctx = cmdio.InContext(ctx, cmd) - err = template.Materialize(ctx, configFilePath, os.DirFS(templateRoot), bundleRoot) + out, err := filer.NewLocalClient(bundleRoot) + require.NoError(t, err) + err = template.Materialize(ctx, configFilePath, os.DirFS(templateRoot), out) return bundleRoot, err } diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 8338e119e..ee30444a5 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -21,8 +21,8 @@ const schemaFileName = "databricks_template_schema.json" // ctx: context containing a cmdio object. This is used to prompt the user // configFilePath: file path containing user defined config values // templateFS: root of the template definition -// outputDir: root of directory where to initialize the template -func Materialize(ctx context.Context, configFilePath string, templateFS fs.FS, outputDir string) error { +// outputFiler: filer to use for writing the initialized template +func Materialize(ctx context.Context, configFilePath string, templateFS fs.FS, outputFiler filer.Filer) error { if _, err := fs.Stat(templateFS, schemaFileName); errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("not a bundle template: expected to find a template schema file at %s", schemaFileName) } @@ -73,12 +73,7 @@ func Materialize(ctx context.Context, configFilePath string, templateFS fs.FS, o return err } - out, err := filer.NewLocalClient(outputDir) - if err != nil { - return err - } - - err = r.persistToDisk(ctx, out) + err = r.persistToDisk(ctx, outputFiler) if err != nil { return err } diff --git a/libs/template/materialize_test.go b/libs/template/materialize_test.go index dc510a30d..f7cd916e3 100644 --- a/libs/template/materialize_test.go +++ b/libs/template/materialize_test.go @@ -19,6 +19,6 @@ func TestMaterializeForNonTemplateDirectory(t *testing.T) { ctx := root.SetWorkspaceClient(context.Background(), w) // Try to materialize a non-template directory. - err = Materialize(ctx, "", os.DirFS(tmpDir), "") + err = Materialize(ctx, "", os.DirFS(tmpDir), nil) assert.EqualError(t, err, fmt.Sprintf("not a bundle template: expected to find a template schema file at %s", schemaFileName)) } From 756e55fabceaf91669a8df682562712a3162da53 Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Wed, 20 Nov 2024 13:22:27 +0100 Subject: [PATCH 23/42] Source-linked deployments for bundles in the workspace (#1884) ## Changes This change adds a preset for source-linked deployments. It is enabled by default for targets in `development` mode **if** the Databricks CLI is running from the `/Workspace` directory on DBR. It does not have an effect when running the CLI anywhere else. Key highlights: 1. Files in this mode won't be uploaded to workspace 2. Created resources will use references to source files instead of their workspace copies ## Tests 1. Apply preset unit test covering conditional logic 2. High-level process target mode unit test for testing integration between mutators --------- Co-authored-by: Pieter Noordhuis --- bundle/config/mutator/apply_presets.go | 10 ++ bundle/config/mutator/apply_presets_test.go | 85 +++++++++ bundle/config/mutator/process_target_mode.go | 9 + .../mutator/process_target_mode_test.go | 33 ++++ bundle/config/mutator/translate_paths.go | 10 +- bundle/config/mutator/translate_paths_test.go | 161 ++++++++++++++++++ bundle/config/presets.go | 5 + bundle/deploy/files/upload.go | 6 + bundle/trampoline/python_dbr_warning.go | 4 + 9 files changed, 321 insertions(+), 2 deletions(-) diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index 59b8547be..9cec704e6 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/textutil" @@ -221,6 +222,15 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos dashboard.DisplayName = prefix + dashboard.DisplayName } + if config.IsExplicitlyEnabled((b.Config.Presets.SourceLinkedDeployment)) { + isDatabricksWorkspace := dbr.RunsOnRuntime(ctx) && strings.HasPrefix(b.SyncRootPath, "/Workspace/") + if !isDatabricksWorkspace { + disabled := false + b.Config.Presets.SourceLinkedDeployment = &disabled + diags = diags.Extend(diag.Warningf("source-linked deployment is available only in the Databricks Workspace")) + } + } + return diags } diff --git a/bundle/config/mutator/apply_presets_test.go b/bundle/config/mutator/apply_presets_test.go index 24295da48..f11a45d63 100644 --- a/bundle/config/mutator/apply_presets_test.go +++ b/bundle/config/mutator/apply_presets_test.go @@ -2,12 +2,14 @@ package mutator_test import ( "context" + "runtime" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" @@ -364,3 +366,86 @@ func TestApplyPresetsResourceNotDefined(t *testing.T) { }) } } + +func TestApplyPresetsSourceLinkedDeployment(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace") + } + + testContext := context.Background() + enabled := true + disabled := false + workspacePath := "/Workspace/user.name@company.com" + + tests := []struct { + bundlePath string + ctx context.Context + name string + initialValue *bool + expectedValue *bool + expectedWarning string + }{ + { + name: "preset enabled, bundle in Workspace, databricks runtime", + bundlePath: workspacePath, + ctx: dbr.MockRuntime(testContext, true), + initialValue: &enabled, + expectedValue: &enabled, + }, + { + name: "preset enabled, bundle not in Workspace, databricks runtime", + bundlePath: "/Users/user.name@company.com", + ctx: dbr.MockRuntime(testContext, true), + initialValue: &enabled, + expectedValue: &disabled, + expectedWarning: "source-linked deployment is available only in the Databricks Workspace", + }, + { + name: "preset enabled, bundle in Workspace, not databricks runtime", + bundlePath: workspacePath, + ctx: dbr.MockRuntime(testContext, false), + initialValue: &enabled, + expectedValue: &disabled, + expectedWarning: "source-linked deployment is available only in the Databricks Workspace", + }, + { + name: "preset disabled, bundle in Workspace, databricks runtime", + bundlePath: workspacePath, + ctx: dbr.MockRuntime(testContext, true), + initialValue: &disabled, + expectedValue: &disabled, + }, + { + name: "preset nil, bundle in Workspace, databricks runtime", + bundlePath: workspacePath, + ctx: dbr.MockRuntime(testContext, true), + initialValue: nil, + expectedValue: nil, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &bundle.Bundle{ + SyncRootPath: tt.bundlePath, + Config: config.Root{ + Presets: config.Presets{ + SourceLinkedDeployment: tt.initialValue, + }, + }, + } + + diags := bundle.Apply(tt.ctx, b, mutator.ApplyPresets()) + if diags.HasError() { + t.Fatalf("unexpected error: %v", diags) + } + + if tt.expectedWarning != "" { + require.Equal(t, tt.expectedWarning, diags[0].Summary) + } + + require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment) + }) + } + +} diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 44b53681d..df0136fad 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/iamutil" @@ -57,6 +58,14 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) { t.TriggerPauseStatus = config.Paused } + if !config.IsExplicitlyDisabled(t.SourceLinkedDeployment) { + isInWorkspace := strings.HasPrefix(b.SyncRootPath, "/Workspace/") + if isInWorkspace && dbr.RunsOnRuntime(ctx) { + enabled := true + t.SourceLinkedDeployment = &enabled + } + } + if !config.IsExplicitlyDisabled(t.PipelinesDevelopment) { enabled := true t.PipelinesDevelopment = &enabled diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 4135d5fdf..c5ea9adea 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -3,14 +3,17 @@ package mutator import ( "context" "reflect" + "runtime" "strings" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/dbr" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/tags" + "github.com/databricks/cli/libs/vfs" sdkconfig "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" @@ -140,6 +143,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, }, }, + SyncRoot: vfs.MustNew("/Users/lennart.kats@databricks.com"), // Use AWS implementation for testing. Tagging: tags.ForCloud(&sdkconfig.Config{ Host: "https://company.cloud.databricks.com", @@ -522,3 +526,32 @@ func TestPipelinesDevelopmentDisabled(t *testing.T) { assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) } + +func TestSourceLinkedDeploymentEnabled(t *testing.T) { + b, diags := processSourceLinkedBundle(t, true) + require.NoError(t, diags.Error()) + assert.True(t, *b.Config.Presets.SourceLinkedDeployment) +} + +func TestSourceLinkedDeploymentDisabled(t *testing.T) { + b, diags := processSourceLinkedBundle(t, false) + require.NoError(t, diags.Error()) + assert.False(t, *b.Config.Presets.SourceLinkedDeployment) +} + +func processSourceLinkedBundle(t *testing.T, presetEnabled bool) (*bundle.Bundle, diag.Diagnostics) { + if runtime.GOOS == "windows" { + t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace") + } + + b := mockBundle(config.Development) + + workspacePath := "/Workspace/lennart@company.com/" + b.SyncRootPath = workspacePath + b.Config.Presets.SourceLinkedDeployment = &presetEnabled + + ctx := dbr.MockRuntime(context.Background(), true) + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(ctx, b, m) + return b, diags +} diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 321fa5b30..1e2484c79 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -11,6 +11,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/notebook" @@ -103,8 +104,13 @@ func (t *translateContext) rewritePath( return fmt.Errorf("path %s is not contained in sync root path", localPath) } - // Prefix remote path with its remote root path. - remotePath := path.Join(t.b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath)) + var workspacePath string + if config.IsExplicitlyEnabled(t.b.Config.Presets.SourceLinkedDeployment) { + workspacePath = t.b.SyncRootPath + } else { + workspacePath = t.b.Config.Workspace.FilePath + } + remotePath := path.Join(workspacePath, filepath.ToSlash(localRelPath)) // Convert local path into workspace path via specified function. interp, err := fn(*p, localPath, localRelPath, remotePath) diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 9d655b27b..a2032f81d 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -4,6 +4,7 @@ import ( "context" "os" "path/filepath" + "runtime" "strings" "testing" @@ -787,3 +788,163 @@ func TestTranslatePathWithComplexVariables(t *testing.T) { b.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl, ) } + +func TestTranslatePathsWithSourceLinkedDeployment(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("this test is not applicable on Windows because source-linked mode works only in the Databricks Workspace") + } + + dir := t.TempDir() + touchNotebookFile(t, filepath.Join(dir, "my_job_notebook.py")) + touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py")) + touchEmptyFile(t, filepath.Join(dir, "my_python_file.py")) + touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar")) + touchEmptyFile(t, filepath.Join(dir, "requirements.txt")) + + enabled := true + b := &bundle.Bundle{ + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), + Config: config.Root{ + Workspace: config.Workspace{ + FilePath: "/bundle", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + NotebookTask: &jobs.NotebookTask{ + NotebookPath: "my_job_notebook.py", + }, + Libraries: []compute.Library{ + {Whl: "./dist/task.whl"}, + }, + }, + { + NotebookTask: &jobs.NotebookTask{ + NotebookPath: "/Users/jane.doe@databricks.com/absolute_remote.py", + }, + }, + { + NotebookTask: &jobs.NotebookTask{ + NotebookPath: "my_job_notebook.py", + }, + Libraries: []compute.Library{ + {Requirements: "requirements.txt"}, + }, + }, + { + SparkPythonTask: &jobs.SparkPythonTask{ + PythonFile: "my_python_file.py", + }, + }, + { + SparkJarTask: &jobs.SparkJarTask{ + MainClassName: "HelloWorld", + }, + Libraries: []compute.Library{ + {Jar: "./dist/task.jar"}, + }, + }, + { + SparkJarTask: &jobs.SparkJarTask{ + MainClassName: "HelloWorldRemote", + }, + Libraries: []compute.Library{ + {Jar: "dbfs:/bundle/dist/task_remote.jar"}, + }, + }, + }, + }, + }, + }, + Pipelines: map[string]*resources.Pipeline{ + "pipeline": { + PipelineSpec: &pipelines.PipelineSpec{ + Libraries: []pipelines.PipelineLibrary{ + { + Notebook: &pipelines.NotebookLibrary{ + Path: "my_pipeline_notebook.py", + }, + }, + { + Notebook: &pipelines.NotebookLibrary{ + Path: "/Users/jane.doe@databricks.com/absolute_remote.py", + }, + }, + { + File: &pipelines.FileLibrary{ + Path: "my_python_file.py", + }, + }, + }, + }, + }, + }, + }, + Presets: config.Presets{ + SourceLinkedDeployment: &enabled, + }, + }, + } + + bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "resource.yml")}}) + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) + + // updated to source path + assert.Equal( + t, + filepath.Join(dir, "my_job_notebook"), + b.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath, + ) + assert.Equal( + t, + filepath.Join(dir, "requirements.txt"), + b.Config.Resources.Jobs["job"].Tasks[2].Libraries[0].Requirements, + ) + assert.Equal( + t, + filepath.Join(dir, "my_python_file.py"), + b.Config.Resources.Jobs["job"].Tasks[3].SparkPythonTask.PythonFile, + ) + assert.Equal( + t, + filepath.Join(dir, "my_pipeline_notebook"), + b.Config.Resources.Pipelines["pipeline"].Libraries[0].Notebook.Path, + ) + assert.Equal( + t, + filepath.Join(dir, "my_python_file.py"), + b.Config.Resources.Pipelines["pipeline"].Libraries[2].File.Path, + ) + + // left as is + assert.Equal( + t, + filepath.Join("dist", "task.whl"), + b.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl, + ) + assert.Equal( + t, + "/Users/jane.doe@databricks.com/absolute_remote.py", + b.Config.Resources.Jobs["job"].Tasks[1].NotebookTask.NotebookPath, + ) + assert.Equal( + t, + filepath.Join("dist", "task.jar"), + b.Config.Resources.Jobs["job"].Tasks[4].Libraries[0].Jar, + ) + assert.Equal( + t, + "dbfs:/bundle/dist/task_remote.jar", + b.Config.Resources.Jobs["job"].Tasks[5].Libraries[0].Jar, + ) + assert.Equal( + t, + "/Users/jane.doe@databricks.com/absolute_remote.py", + b.Config.Resources.Pipelines["pipeline"].Libraries[1].Notebook.Path, + ) +} diff --git a/bundle/config/presets.go b/bundle/config/presets.go index 61009a252..30f56c0f8 100644 --- a/bundle/config/presets.go +++ b/bundle/config/presets.go @@ -17,6 +17,11 @@ type Presets struct { // JobsMaxConcurrentRuns is the default value for the max concurrent runs of jobs. JobsMaxConcurrentRuns int `json:"jobs_max_concurrent_runs,omitempty"` + // SourceLinkedDeployment indicates whether source-linked deployment is enabled. Works only in Databricks Workspace + // When set to true, resources created during deployment will point to source files in the workspace instead of their workspace copies. + // File synchronization to ${workspace.file_path} is skipped. + SourceLinkedDeployment *bool `json:"source_linked_deployment,omitempty"` + // Tags to add to all resources. Tags map[string]string `json:"tags,omitempty"` } diff --git a/bundle/deploy/files/upload.go b/bundle/deploy/files/upload.go index bab4e176c..452850dc4 100644 --- a/bundle/deploy/files/upload.go +++ b/bundle/deploy/files/upload.go @@ -7,6 +7,7 @@ import ( "io/fs" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/permissions" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" @@ -23,6 +24,11 @@ func (m *upload) Name() string { } func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + if config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) { + cmdio.LogString(ctx, "Source-linked deployment is enabled. Deployed resources reference the source files in your working tree instead of separate copies.") + return nil + } + cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath)) opts, err := GetSyncOptions(ctx, bundle.ReadOnly(b)) if err != nil { diff --git a/bundle/trampoline/python_dbr_warning.go b/bundle/trampoline/python_dbr_warning.go index f62e9eab4..cf3e9aeb3 100644 --- a/bundle/trampoline/python_dbr_warning.go +++ b/bundle/trampoline/python_dbr_warning.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" @@ -22,6 +23,9 @@ func WrapperWarning() bundle.Mutator { func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if isPythonWheelWrapperOn(b) { + if config.IsExplicitlyEnabled(b.Config.Presets.SourceLinkedDeployment) { + return diag.Warningf("Python wheel notebook wrapper is not available when using source-linked deployment mode. You can disable this mode by setting 'presets.source_linked_deployment: false'") + } return nil } From fab3e8f168c4fa8811991f407ff64232249f84d1 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 20 Nov 2024 13:20:39 +0100 Subject: [PATCH 24/42] Added integration test to deploy bundle to /Shared root path (#1914) ## Changes Added integration test to deploy bundle to /Shared root path ## Tests ``` --- PASS: TestAccDeployBasicToSharedWorkspace (24.58s) PASS coverage: 31.2% of statements in ./... ok github.com/databricks/cli/internal/bundle 25.572s coverage: 31.2% of statements in ./... ``` --------- Co-authored-by: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> --- .../basic/databricks_template_schema.json | 5 +++ .../basic/template/databricks.yml.tmpl | 4 ++ internal/bundle/deploy_to_shared_test.go | 38 +++++++++++++++++++ 3 files changed, 47 insertions(+) create mode 100644 internal/bundle/deploy_to_shared_test.go diff --git a/internal/bundle/bundles/basic/databricks_template_schema.json b/internal/bundle/bundles/basic/databricks_template_schema.json index c1c5cf12e..41a723b0f 100644 --- a/internal/bundle/bundles/basic/databricks_template_schema.json +++ b/internal/bundle/bundles/basic/databricks_template_schema.json @@ -11,6 +11,11 @@ "node_type_id": { "type": "string", "description": "Node type id for job cluster" + }, + "root_path": { + "type": "string", + "description": "Root path to deploy bundle to", + "default": "" } } } diff --git a/internal/bundle/bundles/basic/template/databricks.yml.tmpl b/internal/bundle/bundles/basic/template/databricks.yml.tmpl index a88cbd30e..0eca4231d 100644 --- a/internal/bundle/bundles/basic/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/basic/template/databricks.yml.tmpl @@ -2,7 +2,11 @@ bundle: name: basic workspace: + {{ if .root_path }} + root_path: "{{.root_path}}/.bundle/{{.unique_id}}" + {{ else }} root_path: "~/.bundle/{{.unique_id}}" + {{ end }} resources: jobs: diff --git a/internal/bundle/deploy_to_shared_test.go b/internal/bundle/deploy_to_shared_test.go new file mode 100644 index 000000000..568c1fb56 --- /dev/null +++ b/internal/bundle/deploy_to_shared_test.go @@ -0,0 +1,38 @@ +package bundle + +import ( + "fmt" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccDeployBasicToSharedWorkspacePath(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + uniqueId := uuid.New().String() + + currentUser, err := wt.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, + "root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName), + }) + require.NoError(t, err) + + t.Cleanup(func() { + err = destroyBundle(wt.T, ctx, bundleRoot) + require.NoError(wt.T, err) + }) + + err = deployBundle(wt.T, ctx, bundleRoot) + require.NoError(wt.T, err) +} From 592e1111b77ca9d4e2ecdfe63e40a8dbeef544d8 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 20 Nov 2024 13:53:25 +0100 Subject: [PATCH 25/42] Update filenames used by bundle generate to use `..yml` (#1901) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes Update filenames used by bundle generate to use '.resource-type.yml' Similar to [Add sub-extension to resource files in built-in templates by shreyas-goenka · Pull Request #1777 · databricks/cli](https://github.com/databricks/cli/pull/1777) --------- Co-authored-by: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> --- cmd/bundle/generate/generate_test.go | 122 +++++++++++++++++++++++++- cmd/bundle/generate/job.go | 14 ++- cmd/bundle/generate/pipeline.go | 14 ++- internal/bundle/bind_resource_test.go | 2 +- 4 files changed, 147 insertions(+), 5 deletions(-) diff --git a/cmd/bundle/generate/generate_test.go b/cmd/bundle/generate/generate_test.go index 943f721c9..bc1549e64 100644 --- a/cmd/bundle/generate/generate_test.go +++ b/cmd/bundle/generate/generate_test.go @@ -3,8 +3,10 @@ package generate import ( "bytes" "context" + "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "testing" @@ -90,7 +92,7 @@ func TestGeneratePipelineCommand(t *testing.T) { err := cmd.RunE(cmd, []string{}) require.NoError(t, err) - data, err := os.ReadFile(filepath.Join(configDir, "test_pipeline.yml")) + data, err := os.ReadFile(filepath.Join(configDir, "test_pipeline.pipeline.yml")) require.NoError(t, err) require.Equal(t, fmt.Sprintf(`resources: pipelines: @@ -186,7 +188,123 @@ func TestGenerateJobCommand(t *testing.T) { err := cmd.RunE(cmd, []string{}) require.NoError(t, err) - data, err := os.ReadFile(filepath.Join(configDir, "test_job.yml")) + data, err := os.ReadFile(filepath.Join(configDir, "test_job.job.yml")) + require.NoError(t, err) + + require.Equal(t, fmt.Sprintf(`resources: + jobs: + test_job: + name: test-job + job_clusters: + - new_cluster: + custom_tags: + "Tag1": "24X7-1234" + - new_cluster: + spark_conf: + "spark.databricks.delta.preview.enabled": "true" + tasks: + - task_key: notebook_task + notebook_task: + notebook_path: %s + parameters: + - name: empty + default: "" +`, filepath.Join("..", "src", "notebook.py")), string(data)) + + data, err = os.ReadFile(filepath.Join(srcDir, "notebook.py")) + require.NoError(t, err) + require.Equal(t, "# Databricks notebook source\nNotebook content", string(data)) +} + +func touchEmptyFile(t *testing.T, path string) { + err := os.MkdirAll(filepath.Dir(path), 0700) + require.NoError(t, err) + f, err := os.Create(path) + require.NoError(t, err) + f.Close() +} + +func TestGenerateJobCommandOldFileRename(t *testing.T) { + cmd := NewGenerateJobCommand() + + root := t.TempDir() + b := &bundle.Bundle{ + BundleRootPath: root, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + jobsApi := m.GetMockJobsAPI() + jobsApi.EXPECT().Get(mock.Anything, jobs.GetJobRequest{JobId: 1234}).Return(&jobs.Job{ + Settings: &jobs.JobSettings{ + Name: "test-job", + JobClusters: []jobs.JobCluster{ + {NewCluster: compute.ClusterSpec{ + CustomTags: map[string]string{ + "Tag1": "24X7-1234", + }, + }}, + {NewCluster: compute.ClusterSpec{ + SparkConf: map[string]string{ + "spark.databricks.delta.preview.enabled": "true", + }, + }}, + }, + Tasks: []jobs.Task{ + { + TaskKey: "notebook_task", + NotebookTask: &jobs.NotebookTask{ + NotebookPath: "/test/notebook", + }, + }, + }, + Parameters: []jobs.JobParameterDefinition{ + { + Name: "empty", + Default: "", + }, + }, + }, + }, nil) + + workspaceApi := m.GetMockWorkspaceAPI() + workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/test/notebook").Return(&workspace.ObjectInfo{ + ObjectType: workspace.ObjectTypeNotebook, + Language: workspace.LanguagePython, + Path: "/test/notebook", + }, nil) + + notebookContent := io.NopCloser(bytes.NewBufferString("# Databricks notebook source\nNotebook content")) + workspaceApi.EXPECT().Download(mock.Anything, "/test/notebook", mock.Anything).Return(notebookContent, nil) + + cmd.SetContext(bundle.Context(context.Background(), b)) + cmd.Flag("existing-job-id").Value.Set("1234") + + configDir := filepath.Join(root, "resources") + cmd.Flag("config-dir").Value.Set(configDir) + + srcDir := filepath.Join(root, "src") + cmd.Flag("source-dir").Value.Set(srcDir) + + var key string + cmd.Flags().StringVar(&key, "key", "test_job", "") + + // Create an old generated file first + oldFilename := filepath.Join(configDir, "test_job.yml") + touchEmptyFile(t, oldFilename) + + // Having an existing files require --force flag to regenerate them + cmd.Flag("force").Value.Set("true") + + err := cmd.RunE(cmd, []string{}) + require.NoError(t, err) + + // Make sure file do not exists after the run + _, err = os.Stat(oldFilename) + require.True(t, errors.Is(err, fs.ErrNotExist)) + + data, err := os.ReadFile(filepath.Join(configDir, "test_job.job.yml")) require.NoError(t, err) require.Equal(t, fmt.Sprintf(`resources: diff --git a/cmd/bundle/generate/job.go b/cmd/bundle/generate/job.go index 99bc61660..9ac41e3cb 100644 --- a/cmd/bundle/generate/job.go +++ b/cmd/bundle/generate/job.go @@ -1,7 +1,9 @@ package generate import ( + "errors" "fmt" + "io/fs" "os" "path/filepath" @@ -83,7 +85,17 @@ func NewGenerateJobCommand() *cobra.Command { return err } - filename := filepath.Join(configDir, fmt.Sprintf("%s.yml", jobKey)) + oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", jobKey)) + filename := filepath.Join(configDir, fmt.Sprintf("%s.job.yml", jobKey)) + + // User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI. + // Due to changing in the generated file names, we need to first rename existing resource file to the new name. + // Otherwise users can end up with duplicated resources. + err = os.Rename(oldFilename, filename) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("failed to rename file %s. DABs uses the resource type as a sub-extension for generated content, please rename it to %s, err: %w", oldFilename, filename, err) + } + saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{ // Including all JobSettings and nested fields which are map[string]string type "spark_conf": yaml.DoubleQuotedStyle, diff --git a/cmd/bundle/generate/pipeline.go b/cmd/bundle/generate/pipeline.go index bd973fe0b..910baa45f 100644 --- a/cmd/bundle/generate/pipeline.go +++ b/cmd/bundle/generate/pipeline.go @@ -1,7 +1,9 @@ package generate import ( + "errors" "fmt" + "io/fs" "os" "path/filepath" @@ -83,7 +85,17 @@ func NewGeneratePipelineCommand() *cobra.Command { return err } - filename := filepath.Join(configDir, fmt.Sprintf("%s.yml", pipelineKey)) + oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", pipelineKey)) + filename := filepath.Join(configDir, fmt.Sprintf("%s.pipeline.yml", pipelineKey)) + + // User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI. + // Due to changing in the generated file names, we need to first rename existing resource file to the new name. + // Otherwise users can end up with duplicated resources. + err = os.Rename(oldFilename, filename) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("failed to rename file %s. DABs uses the resource type as a sub-extension for generated content, please rename it to %s, err: %w", oldFilename, filename, err) + } + saver := yamlsaver.NewSaverWithStyle( // Including all PipelineSpec and nested fields which are map[string]string type map[string]yaml.Style{ diff --git a/internal/bundle/bind_resource_test.go b/internal/bundle/bind_resource_test.go index 2449c31f2..8cc5da536 100644 --- a/internal/bundle/bind_resource_test.go +++ b/internal/bundle/bind_resource_test.go @@ -166,7 +166,7 @@ func TestAccGenerateAndBind(t *testing.T) { _, err = os.Stat(filepath.Join(bundleRoot, "src", "test.py")) require.NoError(t, err) - matches, err := filepath.Glob(filepath.Join(bundleRoot, "resources", "test_job_key.yml")) + matches, err := filepath.Glob(filepath.Join(bundleRoot, "resources", "test_job_key.job.yml")) require.NoError(t, err) require.Len(t, matches, 1) From ade95d96493fe36a5acec754ad26b2890c5fc902 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 20 Nov 2024 15:48:18 +0100 Subject: [PATCH 26/42] [Release] Release v0.235.0 (#1918) **Note:** the `bundle generate` command now uses the `..yml` sub-extension for the configuration files it writes. Existing configuration files that do not use this sub-extension are renamed to include it. Bundles: * Make `TableName` field part of quality monitor schema ([#1903](https://github.com/databricks/cli/pull/1903)). * Do not prepend paths starting with ~ or variable reference ([#1905](https://github.com/databricks/cli/pull/1905)). * Fix workspace extensions filer accidentally reading notebooks ([#1891](https://github.com/databricks/cli/pull/1891)). * Fix template initialization when running on Databricks ([#1912](https://github.com/databricks/cli/pull/1912)). * Source-linked deployments for bundles in the workspace ([#1884](https://github.com/databricks/cli/pull/1884)). * Added integration test to deploy bundle to /Shared root path ([#1914](https://github.com/databricks/cli/pull/1914)). * Update filenames used by bundle generate to use `..yml` ([#1901](https://github.com/databricks/cli/pull/1901)). Internal: * Extract functionality to detect if the CLI is running on DBR ([#1889](https://github.com/databricks/cli/pull/1889)). * Consolidate test helpers for `io/fs` ([#1906](https://github.com/databricks/cli/pull/1906)). * Use `fs.FS` interface to read template ([#1910](https://github.com/databricks/cli/pull/1910)). * Use `filer.Filer` to write template instantiation ([#1911](https://github.com/databricks/cli/pull/1911)). --- CHANGELOG.md | 23 +++++++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index e5b6496bd..f2645b218 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,28 @@ # Version changelog +## [Release] Release v0.235.0 + +**Note:** the `bundle generate` command now uses the `..yml` +sub-extension for the configuration files it writes. Existing configuration +files that do not use this sub-extension are renamed to include it. + +Bundles: + * Make `TableName` field part of quality monitor schema ([#1903](https://github.com/databricks/cli/pull/1903)). + * Do not prepend paths starting with ~ or variable reference ([#1905](https://github.com/databricks/cli/pull/1905)). + * Fix workspace extensions filer accidentally reading notebooks ([#1891](https://github.com/databricks/cli/pull/1891)). + * Fix template initialization when running on Databricks ([#1912](https://github.com/databricks/cli/pull/1912)). + * Source-linked deployments for bundles in the workspace ([#1884](https://github.com/databricks/cli/pull/1884)). + * Added integration test to deploy bundle to /Shared root path ([#1914](https://github.com/databricks/cli/pull/1914)). + * Update filenames used by bundle generate to use `..yml` ([#1901](https://github.com/databricks/cli/pull/1901)). + +Internal: + * Extract functionality to detect if the CLI is running on DBR ([#1889](https://github.com/databricks/cli/pull/1889)). + * Consolidate test helpers for `io/fs` ([#1906](https://github.com/databricks/cli/pull/1906)). + * Use `fs.FS` interface to read template ([#1910](https://github.com/databricks/cli/pull/1910)). + * Use `filer.Filer` to write template instantiation ([#1911](https://github.com/databricks/cli/pull/1911)). + + + ## [Release] Release v0.234.0 Bundles: From 984c38e03ec0b3915ac4d05ff08272ada74f9f6b Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 20 Nov 2024 22:00:10 +0530 Subject: [PATCH 27/42] Add unique ID to `root_path` for bundle integration test fixtures (#1917) ## Changes Integration tests using these fixtures could have been flaky when run in parallel using the same user's identity. They would also possibly have piggybacked state from previous runs. This PR adds a UUID to the root_path to force independent bundle deployments for every test run. I have checked that all bundles in `internal/bundle/bundles` have `root_path` namespaced to a UUID. ## Tests Self testing. --- internal/bundle/bundles/empty_bundle/databricks.yml | 2 -- .../bundles/recreate_pipeline/template/databricks.yml.tmpl | 5 ++++- .../bundle/bundles/uc_schema/template/databricks.yml.tmpl | 5 ++++- 3 files changed, 8 insertions(+), 4 deletions(-) delete mode 100644 internal/bundle/bundles/empty_bundle/databricks.yml diff --git a/internal/bundle/bundles/empty_bundle/databricks.yml b/internal/bundle/bundles/empty_bundle/databricks.yml deleted file mode 100644 index efc627820..000000000 --- a/internal/bundle/bundles/empty_bundle/databricks.yml +++ /dev/null @@ -1,2 +0,0 @@ -bundle: - name: abc diff --git a/internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl b/internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl index 10350f13e..4ebeb2655 100644 --- a/internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/recreate_pipeline/template/databricks.yml.tmpl @@ -1,5 +1,8 @@ bundle: - name: "bundle-playground" + name: recreate-pipeline + +workspace: + root_path: "~/.bundle/{{.unique_id}}" variables: catalog: diff --git a/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl b/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl index 961af25e8..15076ac85 100644 --- a/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/uc_schema/template/databricks.yml.tmpl @@ -1,5 +1,8 @@ bundle: - name: "bundle-playground" + name: uc-schema + +workspace: + root_path: "~/.bundle/{{.unique_id}}" resources: pipelines: From 14fe03dcb95c65cd4d6bb3e18d17108888d9b6c0 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 21 Nov 2024 11:28:50 +0100 Subject: [PATCH 28/42] Breakout variable lookup into separate files and tests (#1921) ## Changes While looking into adding variable lookups for notification destinations ([API][API]), I found the codegen approach for different classes of variable lookups a bit complex. The template had a custom field override (for service principals), the package had an override for the cluster lookup, and it didn't produce tests. The notification destinations API uses a default page size of 20 for listing. I want to use a larger page size to limit the number of API calls, so that would imply another customization on the template or a manual override. This code being rather mechanical, I used copilot to produce all instances of the resolvers and their tests (after writing one of them manually). [api]: https://docs.databricks.com/api/workspace/notificationdestinations ## Tests * Unit tests pass * Manual confirmation that lookups of warehouses still work --- .codegen.json | 3 +- .codegen/lookup.go.tmpl | 134 ------- .gitattributes | 1 - bundle/config/variable/lookup.go | 347 +++--------------- bundle/config/variable/lookup_test.go | 60 +++ bundle/config/variable/resolve_alert.go | 24 ++ bundle/config/variable/resolve_alert_test.go | 49 +++ ...lookup_overrides.go => resolve_cluster.go} | 12 +- .../config/variable/resolve_cluster_policy.go | 24 ++ .../variable/resolve_cluster_policy_test.go | 49 +++ .../config/variable/resolve_cluster_test.go | 50 +++ bundle/config/variable/resolve_dashboard.go | 24 ++ .../config/variable/resolve_dashboard_test.go | 49 +++ .../config/variable/resolve_instance_pool.go | 24 ++ .../variable/resolve_instance_pool_test.go | 49 +++ bundle/config/variable/resolve_job.go | 24 ++ bundle/config/variable/resolve_job_test.go | 49 +++ bundle/config/variable/resolve_metastore.go | 24 ++ .../config/variable/resolve_metastore_test.go | 49 +++ bundle/config/variable/resolve_pipeline.go | 24 ++ .../config/variable/resolve_pipeline_test.go | 49 +++ bundle/config/variable/resolve_query.go | 24 ++ bundle/config/variable/resolve_query_test.go | 49 +++ .../variable/resolve_service_principal.go | 24 ++ .../resolve_service_principal_test.go | 49 +++ bundle/config/variable/resolve_warehouse.go | 24 ++ .../config/variable/resolve_warehouse_test.go | 49 +++ 27 files changed, 898 insertions(+), 439 deletions(-) delete mode 100644 .codegen/lookup.go.tmpl create mode 100644 bundle/config/variable/lookup_test.go create mode 100644 bundle/config/variable/resolve_alert.go create mode 100644 bundle/config/variable/resolve_alert_test.go rename bundle/config/variable/{lookup_overrides.go => resolve_cluster.go} (81%) create mode 100644 bundle/config/variable/resolve_cluster_policy.go create mode 100644 bundle/config/variable/resolve_cluster_policy_test.go create mode 100644 bundle/config/variable/resolve_cluster_test.go create mode 100644 bundle/config/variable/resolve_dashboard.go create mode 100644 bundle/config/variable/resolve_dashboard_test.go create mode 100644 bundle/config/variable/resolve_instance_pool.go create mode 100644 bundle/config/variable/resolve_instance_pool_test.go create mode 100644 bundle/config/variable/resolve_job.go create mode 100644 bundle/config/variable/resolve_job_test.go create mode 100644 bundle/config/variable/resolve_metastore.go create mode 100644 bundle/config/variable/resolve_metastore_test.go create mode 100644 bundle/config/variable/resolve_pipeline.go create mode 100644 bundle/config/variable/resolve_pipeline_test.go create mode 100644 bundle/config/variable/resolve_query.go create mode 100644 bundle/config/variable/resolve_query_test.go create mode 100644 bundle/config/variable/resolve_service_principal.go create mode 100644 bundle/config/variable/resolve_service_principal_test.go create mode 100644 bundle/config/variable/resolve_warehouse.go create mode 100644 bundle/config/variable/resolve_warehouse_test.go diff --git a/.codegen.json b/.codegen.json index 4524ab55d..73ab8c2a4 100644 --- a/.codegen.json +++ b/.codegen.json @@ -5,8 +5,7 @@ }, "batch": { ".codegen/cmds-workspace.go.tmpl": "cmd/workspace/cmd.go", - ".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go", - ".codegen/lookup.go.tmpl": "bundle/config/variable/lookup.go" + ".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go" }, "toolchain": { "required": ["go"], diff --git a/.codegen/lookup.go.tmpl b/.codegen/lookup.go.tmpl deleted file mode 100644 index 124b629d0..000000000 --- a/.codegen/lookup.go.tmpl +++ /dev/null @@ -1,134 +0,0 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - -package variable - -{{ $allowlist := - list - "alerts" - "clusters" - "cluster-policies" - "clusters" - "dashboards" - "instance-pools" - "jobs" - "metastores" - "pipelines" - "service-principals" - "queries" - "warehouses" -}} - -{{ $customField := - dict - "service-principals" "ApplicationId" -}} - -import ( - "context" - "fmt" - - "github.com/databricks/databricks-sdk-go" -) - -type Lookup struct { - {{range .Services -}} - {{- if in $allowlist .KebabName -}} - {{.Singular.PascalName}} string `json:"{{.Singular.SnakeName}},omitempty"` - - {{end}} - {{- end}} -} - -func LookupFromMap(m map[string]any) *Lookup { - l := &Lookup{} - {{range .Services -}} - {{- if in $allowlist .KebabName -}} - if v, ok := m["{{.Singular.SnakeName}}"]; ok { - l.{{.Singular.PascalName}} = v.(string) - } - {{end -}} - {{- end}} - return l -} - -func (l *Lookup) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { - if err := l.validate(); err != nil { - return "", err - } - - r := allResolvers() - {{range .Services -}} - {{- if in $allowlist .KebabName -}} - if l.{{.Singular.PascalName}} != "" { - return r.{{.Singular.PascalName}}(ctx, w, l.{{.Singular.PascalName}}) - } - {{end -}} - {{- end}} - - return "", fmt.Errorf("no valid lookup fields provided") -} - -func (l *Lookup) String() string { - {{range .Services -}} - {{- if in $allowlist .KebabName -}} - if l.{{.Singular.PascalName}} != "" { - return fmt.Sprintf("{{.Singular.KebabName}}: %s", l.{{.Singular.PascalName}}) - } - {{end -}} - {{- end}} - return "" -} - -func (l *Lookup) validate() error { - // Validate that only one field is set - count := 0 - {{range .Services -}} - {{- if in $allowlist .KebabName -}} - if l.{{.Singular.PascalName}} != "" { - count++ - } - {{end -}} - {{- end}} - - if count != 1 { - return fmt.Errorf("exactly one lookup field must be provided") - } - - if strings.Contains(l.String(), "${var") { - return fmt.Errorf("lookup fields cannot contain variable references") - } - - return nil -} - - -type resolverFunc func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) -type resolvers struct { - {{range .Services -}} - {{- if in $allowlist .KebabName -}} - {{.Singular.PascalName}} resolverFunc - {{end -}} - {{- end}} -} - -func allResolvers() *resolvers { - r := &resolvers{} - {{range .Services -}} - {{- if in $allowlist .KebabName -}} - r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["{{.Singular.PascalName}}"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .NamedIdMap.IdPath 0).PascalName) }}), nil - } - {{end -}} - {{- end}} - - return r -} diff --git a/.gitattributes b/.gitattributes index ecb5669ef..2755c02d7 100755 --- a/.gitattributes +++ b/.gitattributes @@ -1,4 +1,3 @@ -bundle/config/variable/lookup.go linguist-generated=true cmd/account/access-control/access-control.go linguist-generated=true cmd/account/billable-usage/billable-usage.go linguist-generated=true cmd/account/budgets/budgets.go linguist-generated=true diff --git a/bundle/config/variable/lookup.go b/bundle/config/variable/lookup.go index e40b0ef7a..f8cb67198 100755 --- a/bundle/config/variable/lookup.go +++ b/bundle/config/variable/lookup.go @@ -1,11 +1,8 @@ -// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. - package variable import ( "context" "fmt" - "strings" "github.com/databricks/databricks-sdk-go" ) @@ -34,323 +31,75 @@ type Lookup struct { Warehouse string `json:"warehouse,omitempty"` } -func LookupFromMap(m map[string]any) *Lookup { - l := &Lookup{} - if v, ok := m["alert"]; ok { - l.Alert = v.(string) +type resolver interface { + // Resolve resolves the underlying entity's ID. + Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) + + // String returns a human-readable representation of the resolver. + String() string +} + +func (l *Lookup) constructResolver() (resolver, error) { + var resolvers []resolver + + if l.Alert != "" { + resolvers = append(resolvers, resolveAlert{name: l.Alert}) } - if v, ok := m["cluster_policy"]; ok { - l.ClusterPolicy = v.(string) + if l.ClusterPolicy != "" { + resolvers = append(resolvers, resolveClusterPolicy{name: l.ClusterPolicy}) } - if v, ok := m["cluster"]; ok { - l.Cluster = v.(string) + if l.Cluster != "" { + resolvers = append(resolvers, resolveCluster{name: l.Cluster}) } - if v, ok := m["dashboard"]; ok { - l.Dashboard = v.(string) + if l.Dashboard != "" { + resolvers = append(resolvers, resolveDashboard{name: l.Dashboard}) } - if v, ok := m["instance_pool"]; ok { - l.InstancePool = v.(string) + if l.InstancePool != "" { + resolvers = append(resolvers, resolveInstancePool{name: l.InstancePool}) } - if v, ok := m["job"]; ok { - l.Job = v.(string) + if l.Job != "" { + resolvers = append(resolvers, resolveJob{name: l.Job}) } - if v, ok := m["metastore"]; ok { - l.Metastore = v.(string) + if l.Metastore != "" { + resolvers = append(resolvers, resolveMetastore{name: l.Metastore}) } - if v, ok := m["pipeline"]; ok { - l.Pipeline = v.(string) + if l.Pipeline != "" { + resolvers = append(resolvers, resolvePipeline{name: l.Pipeline}) } - if v, ok := m["query"]; ok { - l.Query = v.(string) + if l.Query != "" { + resolvers = append(resolvers, resolveQuery{name: l.Query}) } - if v, ok := m["service_principal"]; ok { - l.ServicePrincipal = v.(string) + if l.ServicePrincipal != "" { + resolvers = append(resolvers, resolveServicePrincipal{name: l.ServicePrincipal}) } - if v, ok := m["warehouse"]; ok { - l.Warehouse = v.(string) + if l.Warehouse != "" { + resolvers = append(resolvers, resolveWarehouse{name: l.Warehouse}) } - return l + switch len(resolvers) { + case 0: + return nil, fmt.Errorf("no valid lookup fields provided") + case 1: + return resolvers[0], nil + default: + return nil, fmt.Errorf("exactly one lookup field must be provided") + } } func (l *Lookup) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { - if err := l.validate(); err != nil { + r, err := l.constructResolver() + if err != nil { return "", err } - r := allResolvers() - if l.Alert != "" { - return r.Alert(ctx, w, l.Alert) - } - if l.ClusterPolicy != "" { - return r.ClusterPolicy(ctx, w, l.ClusterPolicy) - } - if l.Cluster != "" { - return r.Cluster(ctx, w, l.Cluster) - } - if l.Dashboard != "" { - return r.Dashboard(ctx, w, l.Dashboard) - } - if l.InstancePool != "" { - return r.InstancePool(ctx, w, l.InstancePool) - } - if l.Job != "" { - return r.Job(ctx, w, l.Job) - } - if l.Metastore != "" { - return r.Metastore(ctx, w, l.Metastore) - } - if l.Pipeline != "" { - return r.Pipeline(ctx, w, l.Pipeline) - } - if l.Query != "" { - return r.Query(ctx, w, l.Query) - } - if l.ServicePrincipal != "" { - return r.ServicePrincipal(ctx, w, l.ServicePrincipal) - } - if l.Warehouse != "" { - return r.Warehouse(ctx, w, l.Warehouse) - } - - return "", fmt.Errorf("no valid lookup fields provided") + return r.Resolve(ctx, w) } func (l *Lookup) String() string { - if l.Alert != "" { - return fmt.Sprintf("alert: %s", l.Alert) - } - if l.ClusterPolicy != "" { - return fmt.Sprintf("cluster-policy: %s", l.ClusterPolicy) - } - if l.Cluster != "" { - return fmt.Sprintf("cluster: %s", l.Cluster) - } - if l.Dashboard != "" { - return fmt.Sprintf("dashboard: %s", l.Dashboard) - } - if l.InstancePool != "" { - return fmt.Sprintf("instance-pool: %s", l.InstancePool) - } - if l.Job != "" { - return fmt.Sprintf("job: %s", l.Job) - } - if l.Metastore != "" { - return fmt.Sprintf("metastore: %s", l.Metastore) - } - if l.Pipeline != "" { - return fmt.Sprintf("pipeline: %s", l.Pipeline) - } - if l.Query != "" { - return fmt.Sprintf("query: %s", l.Query) - } - if l.ServicePrincipal != "" { - return fmt.Sprintf("service-principal: %s", l.ServicePrincipal) - } - if l.Warehouse != "" { - return fmt.Sprintf("warehouse: %s", l.Warehouse) + r, _ := l.constructResolver() + if r == nil { + return "" } - return "" -} - -func (l *Lookup) validate() error { - // Validate that only one field is set - count := 0 - if l.Alert != "" { - count++ - } - if l.ClusterPolicy != "" { - count++ - } - if l.Cluster != "" { - count++ - } - if l.Dashboard != "" { - count++ - } - if l.InstancePool != "" { - count++ - } - if l.Job != "" { - count++ - } - if l.Metastore != "" { - count++ - } - if l.Pipeline != "" { - count++ - } - if l.Query != "" { - count++ - } - if l.ServicePrincipal != "" { - count++ - } - if l.Warehouse != "" { - count++ - } - - if count != 1 { - return fmt.Errorf("exactly one lookup field must be provided") - } - - if strings.Contains(l.String(), "${var") { - return fmt.Errorf("lookup fields cannot contain variable references") - } - - return nil -} - -type resolverFunc func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) -type resolvers struct { - Alert resolverFunc - ClusterPolicy resolverFunc - Cluster resolverFunc - Dashboard resolverFunc - InstancePool resolverFunc - Job resolverFunc - Metastore resolverFunc - Pipeline resolverFunc - Query resolverFunc - ServicePrincipal resolverFunc - Warehouse resolverFunc -} - -func allResolvers() *resolvers { - r := &resolvers{} - r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["Alert"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.Alerts.GetByDisplayName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.Id), nil - } - r.ClusterPolicy = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["ClusterPolicy"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.ClusterPolicies.GetByName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.PolicyId), nil - } - r.Cluster = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["Cluster"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.Clusters.GetByClusterName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.ClusterId), nil - } - r.Dashboard = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["Dashboard"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.Dashboards.GetByName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.Id), nil - } - r.InstancePool = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["InstancePool"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.InstancePools.GetByInstancePoolName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.InstancePoolId), nil - } - r.Job = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["Job"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.Jobs.GetBySettingsName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.JobId), nil - } - r.Metastore = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["Metastore"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.Metastores.GetByName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.MetastoreId), nil - } - r.Pipeline = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["Pipeline"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.Pipelines.GetByName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.PipelineId), nil - } - r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["Query"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.Queries.GetByDisplayName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.Id), nil - } - r.ServicePrincipal = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["ServicePrincipal"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.ServicePrincipals.GetByDisplayName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.ApplicationId), nil - } - r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - fn, ok := lookupOverrides["Warehouse"] - if ok { - return fn(ctx, w, name) - } - entity, err := w.Warehouses.GetByName(ctx, name) - if err != nil { - return "", err - } - - return fmt.Sprint(entity.Id), nil - } - - return r + return r.String() } diff --git a/bundle/config/variable/lookup_test.go b/bundle/config/variable/lookup_test.go new file mode 100644 index 000000000..a84748751 --- /dev/null +++ b/bundle/config/variable/lookup_test.go @@ -0,0 +1,60 @@ +package variable + +import ( + "context" + "reflect" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLookup_Coverage(t *testing.T) { + var lookup Lookup + val := reflect.ValueOf(lookup) + typ := val.Type() + + for i := 0; i < val.NumField(); i++ { + field := val.Field(i) + if field.Kind() != reflect.String { + t.Fatalf("Field %s is not a string", typ.Field(i).Name) + } + + fieldType := typ.Field(i) + t.Run(fieldType.Name, func(t *testing.T) { + // Use a fresh instance of the struct in each test + var lookup Lookup + + // Set the field to a non-empty string + reflect.ValueOf(&lookup).Elem().Field(i).SetString("value") + + // Test the [String] function + assert.NotEmpty(t, lookup.String()) + }) + } +} + +func TestLookup_Empty(t *testing.T) { + var lookup Lookup + + // Resolve returns an error when no fields are provided + _, err := lookup.Resolve(context.Background(), nil) + assert.ErrorContains(t, err, "no valid lookup fields provided") + + // No string representation for an invalid lookup + assert.Empty(t, lookup.String()) + +} + +func TestLookup_Multiple(t *testing.T) { + lookup := Lookup{ + Alert: "alert", + Query: "query", + } + + // Resolve returns an error when multiple fields are provided + _, err := lookup.Resolve(context.Background(), nil) + assert.ErrorContains(t, err, "exactly one lookup field must be provided") + + // No string representation for an invalid lookup + assert.Empty(t, lookup.String()) +} diff --git a/bundle/config/variable/resolve_alert.go b/bundle/config/variable/resolve_alert.go new file mode 100644 index 000000000..be83e81fa --- /dev/null +++ b/bundle/config/variable/resolve_alert.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolveAlert struct { + name string +} + +func (l resolveAlert) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.Alerts.GetByDisplayName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.Id), nil +} + +func (l resolveAlert) String() string { + return fmt.Sprintf("alert: %s", l.name) +} diff --git a/bundle/config/variable/resolve_alert_test.go b/bundle/config/variable/resolve_alert_test.go new file mode 100644 index 000000000..32f8d641b --- /dev/null +++ b/bundle/config/variable/resolve_alert_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveAlert_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockAlertsAPI() + api.EXPECT(). + GetByDisplayName(mock.Anything, "alert"). + Return(&sql.ListAlertsResponseAlert{ + Id: "1234", + }, nil) + + ctx := context.Background() + l := resolveAlert{name: "alert"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "1234", result) +} + +func TestResolveAlert_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockAlertsAPI() + api.EXPECT(). + GetByDisplayName(mock.Anything, "alert"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolveAlert{name: "alert"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolveAlert_String(t *testing.T) { + l := resolveAlert{name: "name"} + assert.Equal(t, "alert: name", l.String()) +} diff --git a/bundle/config/variable/lookup_overrides.go b/bundle/config/variable/resolve_cluster.go similarity index 81% rename from bundle/config/variable/lookup_overrides.go rename to bundle/config/variable/resolve_cluster.go index 1be373dc6..2d68b7fb7 100644 --- a/bundle/config/variable/lookup_overrides.go +++ b/bundle/config/variable/resolve_cluster.go @@ -8,13 +8,13 @@ import ( "github.com/databricks/databricks-sdk-go/service/compute" ) -var lookupOverrides = map[string]resolverFunc{ - "Cluster": resolveCluster, +type resolveCluster struct { + name string } // We added a custom resolver for the cluster to add filtering for the cluster source when we list all clusters. // Without the filtering listing could take a very long time (5-10 mins) which leads to lookup timeouts. -func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { +func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { result, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{ FilterBy: &compute.ListClustersFilterBy{ ClusterSources: []compute.ClusterSource{compute.ClusterSourceApi, compute.ClusterSourceUi}, @@ -30,6 +30,8 @@ func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name str key := v.ClusterName tmp[key] = append(tmp[key], v) } + + name := l.name alternatives, ok := tmp[name] if !ok || len(alternatives) == 0 { return "", fmt.Errorf("cluster named '%s' does not exist", name) @@ -39,3 +41,7 @@ func resolveCluster(ctx context.Context, w *databricks.WorkspaceClient, name str } return alternatives[0].ClusterId, nil } + +func (l resolveCluster) String() string { + return fmt.Sprintf("cluster: %s", l.name) +} diff --git a/bundle/config/variable/resolve_cluster_policy.go b/bundle/config/variable/resolve_cluster_policy.go new file mode 100644 index 000000000..b19380a63 --- /dev/null +++ b/bundle/config/variable/resolve_cluster_policy.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolveClusterPolicy struct { + name string +} + +func (l resolveClusterPolicy) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.ClusterPolicies.GetByName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.PolicyId), nil +} + +func (l resolveClusterPolicy) String() string { + return fmt.Sprintf("cluster-policy: %s", l.name) +} diff --git a/bundle/config/variable/resolve_cluster_policy_test.go b/bundle/config/variable/resolve_cluster_policy_test.go new file mode 100644 index 000000000..fb17fad18 --- /dev/null +++ b/bundle/config/variable/resolve_cluster_policy_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveClusterPolicy_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockClusterPoliciesAPI() + api.EXPECT(). + GetByName(mock.Anything, "policy"). + Return(&compute.Policy{ + PolicyId: "1234", + }, nil) + + ctx := context.Background() + l := resolveClusterPolicy{name: "policy"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "1234", result) +} + +func TestResolveClusterPolicy_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockClusterPoliciesAPI() + api.EXPECT(). + GetByName(mock.Anything, "policy"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolveClusterPolicy{name: "policy"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolveClusterPolicy_String(t *testing.T) { + l := resolveClusterPolicy{name: "name"} + assert.Equal(t, "cluster-policy: name", l.String()) +} diff --git a/bundle/config/variable/resolve_cluster_test.go b/bundle/config/variable/resolve_cluster_test.go new file mode 100644 index 000000000..2f3aa27cf --- /dev/null +++ b/bundle/config/variable/resolve_cluster_test.go @@ -0,0 +1,50 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveCluster_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockClustersAPI() + api.EXPECT(). + ListAll(mock.Anything, mock.Anything). + Return([]compute.ClusterDetails{ + {ClusterId: "1234", ClusterName: "cluster1"}, + {ClusterId: "2345", ClusterName: "cluster2"}, + }, nil) + + ctx := context.Background() + l := resolveCluster{name: "cluster2"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "2345", result) +} + +func TestResolveCluster_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockClustersAPI() + api.EXPECT(). + ListAll(mock.Anything, mock.Anything). + Return([]compute.ClusterDetails{}, nil) + + ctx := context.Background() + l := resolveCluster{name: "cluster"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.Error(t, err) + assert.Contains(t, err.Error(), "cluster named 'cluster' does not exist") +} + +func TestResolveCluster_String(t *testing.T) { + l := resolveCluster{name: "name"} + assert.Equal(t, "cluster: name", l.String()) +} diff --git a/bundle/config/variable/resolve_dashboard.go b/bundle/config/variable/resolve_dashboard.go new file mode 100644 index 000000000..44fd45197 --- /dev/null +++ b/bundle/config/variable/resolve_dashboard.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolveDashboard struct { + name string +} + +func (l resolveDashboard) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.Dashboards.GetByName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.Id), nil +} + +func (l resolveDashboard) String() string { + return fmt.Sprintf("dashboard: %s", l.name) +} diff --git a/bundle/config/variable/resolve_dashboard_test.go b/bundle/config/variable/resolve_dashboard_test.go new file mode 100644 index 000000000..3afed4794 --- /dev/null +++ b/bundle/config/variable/resolve_dashboard_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveDashboard_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockDashboardsAPI() + api.EXPECT(). + GetByName(mock.Anything, "dashboard"). + Return(&sql.Dashboard{ + Id: "1234", + }, nil) + + ctx := context.Background() + l := resolveDashboard{name: "dashboard"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "1234", result) +} + +func TestResolveDashboard_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockDashboardsAPI() + api.EXPECT(). + GetByName(mock.Anything, "dashboard"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolveDashboard{name: "dashboard"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolveDashboard_String(t *testing.T) { + l := resolveDashboard{name: "name"} + assert.Equal(t, "dashboard: name", l.String()) +} diff --git a/bundle/config/variable/resolve_instance_pool.go b/bundle/config/variable/resolve_instance_pool.go new file mode 100644 index 000000000..cbf0775c9 --- /dev/null +++ b/bundle/config/variable/resolve_instance_pool.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolveInstancePool struct { + name string +} + +func (l resolveInstancePool) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.InstancePools.GetByInstancePoolName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.InstancePoolId), nil +} + +func (l resolveInstancePool) String() string { + return fmt.Sprintf("instance-pool: %s", l.name) +} diff --git a/bundle/config/variable/resolve_instance_pool_test.go b/bundle/config/variable/resolve_instance_pool_test.go new file mode 100644 index 000000000..cfd1ba015 --- /dev/null +++ b/bundle/config/variable/resolve_instance_pool_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveInstancePool_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockInstancePoolsAPI() + api.EXPECT(). + GetByInstancePoolName(mock.Anything, "instance_pool"). + Return(&compute.InstancePoolAndStats{ + InstancePoolId: "5678", + }, nil) + + ctx := context.Background() + l := resolveInstancePool{name: "instance_pool"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "5678", result) +} + +func TestResolveInstancePool_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockInstancePoolsAPI() + api.EXPECT(). + GetByInstancePoolName(mock.Anything, "instance_pool"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolveInstancePool{name: "instance_pool"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolveInstancePool_String(t *testing.T) { + l := resolveInstancePool{name: "name"} + assert.Equal(t, "instance-pool: name", l.String()) +} diff --git a/bundle/config/variable/resolve_job.go b/bundle/config/variable/resolve_job.go new file mode 100644 index 000000000..3def64888 --- /dev/null +++ b/bundle/config/variable/resolve_job.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolveJob struct { + name string +} + +func (l resolveJob) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.Jobs.GetBySettingsName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.JobId), nil +} + +func (l resolveJob) String() string { + return fmt.Sprintf("job: %s", l.name) +} diff --git a/bundle/config/variable/resolve_job_test.go b/bundle/config/variable/resolve_job_test.go new file mode 100644 index 000000000..523d07957 --- /dev/null +++ b/bundle/config/variable/resolve_job_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveJob_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockJobsAPI() + api.EXPECT(). + GetBySettingsName(mock.Anything, "job"). + Return(&jobs.BaseJob{ + JobId: 5678, + }, nil) + + ctx := context.Background() + l := resolveJob{name: "job"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "5678", result) +} + +func TestResolveJob_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockJobsAPI() + api.EXPECT(). + GetBySettingsName(mock.Anything, "job"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolveJob{name: "job"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolveJob_String(t *testing.T) { + l := resolveJob{name: "name"} + assert.Equal(t, "job: name", l.String()) +} diff --git a/bundle/config/variable/resolve_metastore.go b/bundle/config/variable/resolve_metastore.go new file mode 100644 index 000000000..958e43787 --- /dev/null +++ b/bundle/config/variable/resolve_metastore.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolveMetastore struct { + name string +} + +func (l resolveMetastore) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.Metastores.GetByName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.MetastoreId), nil +} + +func (l resolveMetastore) String() string { + return fmt.Sprintf("metastore: %s", l.name) +} diff --git a/bundle/config/variable/resolve_metastore_test.go b/bundle/config/variable/resolve_metastore_test.go new file mode 100644 index 000000000..55c4d92d0 --- /dev/null +++ b/bundle/config/variable/resolve_metastore_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveMetastore_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockMetastoresAPI() + api.EXPECT(). + GetByName(mock.Anything, "metastore"). + Return(&catalog.MetastoreInfo{ + MetastoreId: "abcd", + }, nil) + + ctx := context.Background() + l := resolveMetastore{name: "metastore"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "abcd", result) +} + +func TestResolveMetastore_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockMetastoresAPI() + api.EXPECT(). + GetByName(mock.Anything, "metastore"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolveMetastore{name: "metastore"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolveMetastore_String(t *testing.T) { + l := resolveMetastore{name: "name"} + assert.Equal(t, "metastore: name", l.String()) +} diff --git a/bundle/config/variable/resolve_pipeline.go b/bundle/config/variable/resolve_pipeline.go new file mode 100644 index 000000000..cabc620da --- /dev/null +++ b/bundle/config/variable/resolve_pipeline.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolvePipeline struct { + name string +} + +func (l resolvePipeline) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.Pipelines.GetByName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.PipelineId), nil +} + +func (l resolvePipeline) String() string { + return fmt.Sprintf("pipeline: %s", l.name) +} diff --git a/bundle/config/variable/resolve_pipeline_test.go b/bundle/config/variable/resolve_pipeline_test.go new file mode 100644 index 000000000..620d76243 --- /dev/null +++ b/bundle/config/variable/resolve_pipeline_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolvePipeline_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockPipelinesAPI() + api.EXPECT(). + GetByName(mock.Anything, "pipeline"). + Return(&pipelines.PipelineStateInfo{ + PipelineId: "abcd", + }, nil) + + ctx := context.Background() + l := resolvePipeline{name: "pipeline"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "abcd", result) +} + +func TestResolvePipeline_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockPipelinesAPI() + api.EXPECT(). + GetByName(mock.Anything, "pipeline"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolvePipeline{name: "pipeline"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolvePipeline_String(t *testing.T) { + l := resolvePipeline{name: "name"} + assert.Equal(t, "pipeline: name", l.String()) +} diff --git a/bundle/config/variable/resolve_query.go b/bundle/config/variable/resolve_query.go new file mode 100644 index 000000000..602ff8deb --- /dev/null +++ b/bundle/config/variable/resolve_query.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolveQuery struct { + name string +} + +func (l resolveQuery) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.Queries.GetByDisplayName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.Id), nil +} + +func (l resolveQuery) String() string { + return fmt.Sprintf("query: %s", l.name) +} diff --git a/bundle/config/variable/resolve_query_test.go b/bundle/config/variable/resolve_query_test.go new file mode 100644 index 000000000..21516e452 --- /dev/null +++ b/bundle/config/variable/resolve_query_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveQuery_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockQueriesAPI() + api.EXPECT(). + GetByDisplayName(mock.Anything, "query"). + Return(&sql.ListQueryObjectsResponseQuery{ + Id: "1234", + }, nil) + + ctx := context.Background() + l := resolveQuery{name: "query"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "1234", result) +} + +func TestResolveQuery_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockQueriesAPI() + api.EXPECT(). + GetByDisplayName(mock.Anything, "query"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolveQuery{name: "query"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolveQuery_String(t *testing.T) { + l := resolveQuery{name: "name"} + assert.Equal(t, "query: name", l.String()) +} diff --git a/bundle/config/variable/resolve_service_principal.go b/bundle/config/variable/resolve_service_principal.go new file mode 100644 index 000000000..3bea4314b --- /dev/null +++ b/bundle/config/variable/resolve_service_principal.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolveServicePrincipal struct { + name string +} + +func (l resolveServicePrincipal) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.ServicePrincipals.GetByDisplayName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.ApplicationId), nil +} + +func (l resolveServicePrincipal) String() string { + return fmt.Sprintf("service-principal: %s", l.name) +} diff --git a/bundle/config/variable/resolve_service_principal_test.go b/bundle/config/variable/resolve_service_principal_test.go new file mode 100644 index 000000000..c80f9e4a6 --- /dev/null +++ b/bundle/config/variable/resolve_service_principal_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveServicePrincipal_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockServicePrincipalsAPI() + api.EXPECT(). + GetByDisplayName(mock.Anything, "service-principal"). + Return(&iam.ServicePrincipal{ + ApplicationId: "5678", + }, nil) + + ctx := context.Background() + l := resolveServicePrincipal{name: "service-principal"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "5678", result) +} + +func TestResolveServicePrincipal_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockServicePrincipalsAPI() + api.EXPECT(). + GetByDisplayName(mock.Anything, "service-principal"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolveServicePrincipal{name: "service-principal"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolveServicePrincipal_String(t *testing.T) { + l := resolveServicePrincipal{name: "name"} + assert.Equal(t, "service-principal: name", l.String()) +} diff --git a/bundle/config/variable/resolve_warehouse.go b/bundle/config/variable/resolve_warehouse.go new file mode 100644 index 000000000..fbd3663a2 --- /dev/null +++ b/bundle/config/variable/resolve_warehouse.go @@ -0,0 +1,24 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" +) + +type resolveWarehouse struct { + name string +} + +func (l resolveWarehouse) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + entity, err := w.Warehouses.GetByName(ctx, l.name) + if err != nil { + return "", err + } + return fmt.Sprint(entity.Id), nil +} + +func (l resolveWarehouse) String() string { + return fmt.Sprintf("warehouse: %s", l.name) +} diff --git a/bundle/config/variable/resolve_warehouse_test.go b/bundle/config/variable/resolve_warehouse_test.go new file mode 100644 index 000000000..68e3925bc --- /dev/null +++ b/bundle/config/variable/resolve_warehouse_test.go @@ -0,0 +1,49 @@ +package variable + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveWarehouse_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockWarehousesAPI() + api.EXPECT(). + GetByName(mock.Anything, "warehouse"). + Return(&sql.EndpointInfo{ + Id: "abcd", + }, nil) + + ctx := context.Background() + l := resolveWarehouse{name: "warehouse"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "abcd", result) +} + +func TestResolveWarehouse_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockWarehousesAPI() + api.EXPECT(). + GetByName(mock.Anything, "warehouse"). + Return(nil, &apierr.APIError{StatusCode: 404}) + + ctx := context.Background() + l := resolveWarehouse{name: "warehouse"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.ErrorIs(t, err, apierr.ErrNotFound) +} + +func TestResolveWarehouse_String(t *testing.T) { + l := resolveWarehouse{name: "name"} + assert.Equal(t, "warehouse: name", l.String()) +} From c2e2abcc35bdbc822681ff577728bd3685ac0e9f Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 21 Nov 2024 16:21:21 +0530 Subject: [PATCH 29/42] Extend "notebook not found" error to warn about missing extension (#1920) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes The full workspace path for a notebook does not contain the notebook's extension. If a user converts that file path to a relative path (like `/Workspace/bundle_root/bar/nb` -> `./bar/nb`), they can be confused as to why the new file path does not work. The changes in this PR nudge them to add the appropriate file extension (e.g., `./bar/nb.py` or `./bar/nb.ipynb`). One common way users can end up in this scenario is by using the view job as YAML functionality in the Databricks UI. ## Tests Unit test and manually. ``` (.venv) ➜ bundle-playground git:(master) ✗ cli bundle validate Error: notebook ./foo not found. Local notebook references are expected to contain one of the following file extensions: [.py, .r, .scala, .sql, .ipynb] ``` --- bundle/config/mutator/translate_paths.go | 28 +++++++++- bundle/config/mutator/translate_paths_test.go | 54 +++++++++++++++++++ 2 files changed, 81 insertions(+), 1 deletion(-) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 1e2484c79..5e016d8a1 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -126,7 +126,33 @@ func (t *translateContext) rewritePath( func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath)) if errors.Is(err, fs.ErrNotExist) { - return "", fmt.Errorf("notebook %s not found", literal) + if filepath.Ext(localFullPath) != notebook.ExtensionNone { + return "", fmt.Errorf("notebook %s not found", literal) + } + + extensions := []string{ + notebook.ExtensionPython, + notebook.ExtensionR, + notebook.ExtensionScala, + notebook.ExtensionSql, + notebook.ExtensionJupyter, + } + + // Check whether a file with a notebook extension already exists. This + // way we can provide a more targeted error message. + for _, ext := range extensions { + literalWithExt := literal + ext + localRelPathWithExt := filepath.ToSlash(localRelPath + ext) + if _, err := fs.Stat(t.b.SyncRoot, localRelPathWithExt); err == nil { + return "", fmt.Errorf(`notebook %s not found. Did you mean %s? +Local notebook references are expected to contain one of the following +file extensions: [%s]`, literal, literalWithExt, strings.Join(extensions, ", ")) + } + } + + // Return a generic error message if no matching possible file is found. + return "", fmt.Errorf(`notebook %s not found. Local notebook references are expected +to contain one of the following file extensions: [%s]`, literal, strings.Join(extensions, ", ")) } if err != nil { return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localFullPath, err) diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index a2032f81d..bf6ba15d8 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -2,6 +2,7 @@ package mutator_test import ( "context" + "fmt" "os" "path/filepath" "runtime" @@ -508,6 +509,59 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found") } +func TestPipelineNotebookDoesNotExistErrorWithoutExtension(t *testing.T) { + for _, ext := range []string{ + ".py", + ".r", + ".scala", + ".sql", + ".ipynb", + "", + } { + t.Run("case_"+ext, func(t *testing.T) { + dir := t.TempDir() + + if ext != "" { + touchEmptyFile(t, filepath.Join(dir, "foo"+ext)) + } + + b := &bundle.Bundle{ + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "pipeline": { + PipelineSpec: &pipelines.PipelineSpec{ + Libraries: []pipelines.PipelineLibrary{ + { + Notebook: &pipelines.NotebookLibrary{ + Path: "./foo", + }, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, ".", []dyn.Location{{File: filepath.Join(dir, "fake.yml")}}) + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + + if ext == "" { + assert.EqualError(t, diags.Error(), `notebook ./foo not found. Local notebook references are expected +to contain one of the following file extensions: [.py, .r, .scala, .sql, .ipynb]`) + } else { + assert.EqualError(t, diags.Error(), fmt.Sprintf(`notebook ./foo not found. Did you mean ./foo%s? +Local notebook references are expected to contain one of the following +file extensions: [.py, .r, .scala, .sql, .ipynb]`, ext)) + } + }) + } +} + func TestPipelineFileDoesNotExistError(t *testing.T) { dir := t.TempDir() From abc2f3c825e1e445ba561d62118025120d07928f Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 21 Nov 2024 16:16:24 +0530 Subject: [PATCH 30/42] Fix `TestAccBundleInitOnMlopsStacks` (#1924) ## Changes The ML production team modified mlops-stack to use `mode: development` for their development target here: https://github.com/databricks/mlops-stacks/pull/174 This PR makes the integration test assertion agnostic of the prefix to make it pass again. ## Tests The test passes now --- internal/init_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/init_test.go b/internal/init_test.go index a6241d629..25bfc19da 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -97,7 +97,7 @@ func TestAccBundleInitOnMlopsStacks(t *testing.T) { require.NoError(t, err) job, err := w.Jobs.GetByJobId(context.Background(), batchJobId) assert.NoError(t, err) - assert.Equal(t, fmt.Sprintf("dev-%s-batch-inference-job", projectName), job.Settings.Name) + assert.Contains(t, job.Settings.Name, fmt.Sprintf("dev-%s-batch-inference-job", projectName)) } func TestAccBundleInitHelpers(t *testing.T) { From a3cea07c9e41229cbaea8af3fbb100a20f0d529f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 21 Nov 2024 15:52:14 +0100 Subject: [PATCH 31/42] Support lookup by name of notification destinations (#1922) ## Changes Add support for notification destinations in variable lookups. More information: https://docs.databricks.com/en/admin/workspace-settings/notification-destinations.html Depends on #1921. ## Tests * New unit test * Manually confirmed that the lookup works --- bundle/config/variable/lookup.go | 5 ++ .../resolve_notification_destination.go | 46 +++++++++++ .../resolve_notification_destination_test.go | 82 +++++++++++++++++++ 3 files changed, 133 insertions(+) create mode 100644 bundle/config/variable/resolve_notification_destination.go create mode 100644 bundle/config/variable/resolve_notification_destination_test.go diff --git a/bundle/config/variable/lookup.go b/bundle/config/variable/lookup.go index f8cb67198..37e380f18 100755 --- a/bundle/config/variable/lookup.go +++ b/bundle/config/variable/lookup.go @@ -22,6 +22,8 @@ type Lookup struct { Metastore string `json:"metastore,omitempty"` + NotificationDestination string `json:"notification_destination,omitempty"` + Pipeline string `json:"pipeline,omitempty"` Query string `json:"query,omitempty"` @@ -63,6 +65,9 @@ func (l *Lookup) constructResolver() (resolver, error) { if l.Metastore != "" { resolvers = append(resolvers, resolveMetastore{name: l.Metastore}) } + if l.NotificationDestination != "" { + resolvers = append(resolvers, resolveNotificationDestination{name: l.NotificationDestination}) + } if l.Pipeline != "" { resolvers = append(resolvers, resolvePipeline{name: l.Pipeline}) } diff --git a/bundle/config/variable/resolve_notification_destination.go b/bundle/config/variable/resolve_notification_destination.go new file mode 100644 index 000000000..4c4cd892a --- /dev/null +++ b/bundle/config/variable/resolve_notification_destination.go @@ -0,0 +1,46 @@ +package variable + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/settings" +) + +type resolveNotificationDestination struct { + name string +} + +func (l resolveNotificationDestination) Resolve(ctx context.Context, w *databricks.WorkspaceClient) (string, error) { + result, err := w.NotificationDestinations.ListAll(ctx, settings.ListNotificationDestinationsRequest{ + // The default page size for this API is 20. + // We use a higher value to make fewer API calls. + PageSize: 200, + }) + if err != nil { + return "", err + } + + // Collect all notification destinations with the given name. + var entities []settings.ListNotificationDestinationsResult + for _, entity := range result { + if entity.DisplayName == l.name { + entities = append(entities, entity) + } + } + + // Return the ID of the first matching notification destination. + switch len(entities) { + case 0: + return "", fmt.Errorf("notification destination named %q does not exist", l.name) + case 1: + return entities[0].Id, nil + default: + return "", fmt.Errorf("there are %d instances of clusters named %q", len(entities), l.name) + } +} + +func (l resolveNotificationDestination) String() string { + return fmt.Sprintf("notification-destination: %s", l.name) +} diff --git a/bundle/config/variable/resolve_notification_destination_test.go b/bundle/config/variable/resolve_notification_destination_test.go new file mode 100644 index 000000000..2b8201d15 --- /dev/null +++ b/bundle/config/variable/resolve_notification_destination_test.go @@ -0,0 +1,82 @@ +package variable + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestResolveNotificationDestination_ResolveSuccess(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockNotificationDestinationsAPI() + api.EXPECT(). + ListAll(mock.Anything, mock.Anything). + Return([]settings.ListNotificationDestinationsResult{ + {Id: "1234", DisplayName: "destination"}, + }, nil) + + ctx := context.Background() + l := resolveNotificationDestination{name: "destination"} + result, err := l.Resolve(ctx, m.WorkspaceClient) + require.NoError(t, err) + assert.Equal(t, "1234", result) +} + +func TestResolveNotificationDestination_ResolveError(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockNotificationDestinationsAPI() + api.EXPECT(). + ListAll(mock.Anything, mock.Anything). + Return(nil, fmt.Errorf("bad")) + + ctx := context.Background() + l := resolveNotificationDestination{name: "destination"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + assert.ErrorContains(t, err, "bad") +} + +func TestResolveNotificationDestination_ResolveNotFound(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockNotificationDestinationsAPI() + api.EXPECT(). + ListAll(mock.Anything, mock.Anything). + Return([]settings.ListNotificationDestinationsResult{}, nil) + + ctx := context.Background() + l := resolveNotificationDestination{name: "destination"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.Error(t, err) + assert.ErrorContains(t, err, `notification destination named "destination" does not exist`) +} + +func TestResolveNotificationDestination_ResolveMultiple(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + + api := m.GetMockNotificationDestinationsAPI() + api.EXPECT(). + ListAll(mock.Anything, mock.Anything). + Return([]settings.ListNotificationDestinationsResult{ + {Id: "1234", DisplayName: "destination"}, + {Id: "5678", DisplayName: "destination"}, + }, nil) + + ctx := context.Background() + l := resolveNotificationDestination{name: "destination"} + _, err := l.Resolve(ctx, m.WorkspaceClient) + require.Error(t, err) + assert.ErrorContains(t, err, `there are 2 instances of clusters named "destination"`) +} + +func TestResolveNotificationDestination_String(t *testing.T) { + l := resolveNotificationDestination{name: "name"} + assert.Equal(t, "notification-destination: name", l.String()) +} From abfd1713e088503eb869981b4f2f1aa6c9b810a5 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 21 Nov 2024 16:03:13 +0100 Subject: [PATCH 32/42] Skip sync warning if no sync paths are defined (#1926) ## Changes Users can configure the bundle to not synchronize any files with: ```yaml sync: paths: [] ``` If it is explicitly configured as an empty list, the validate command must not warn about not having any files to synchronize. The warning exists to alert users who are unintentionally not synchronizing any files (they might have a `.gitignore` pattern that matches everything). Closes #1663. ## Tests * New unit test. --- bundle/config/validate/files_to_sync.go | 7 ++ bundle/config/validate/files_to_sync_test.go | 105 +++++++++++++++++++ 2 files changed, 112 insertions(+) create mode 100644 bundle/config/validate/files_to_sync_test.go diff --git a/bundle/config/validate/files_to_sync.go b/bundle/config/validate/files_to_sync.go index 7cdad772a..a14278482 100644 --- a/bundle/config/validate/files_to_sync.go +++ b/bundle/config/validate/files_to_sync.go @@ -21,6 +21,12 @@ func (v *filesToSync) Name() string { } func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + // The user may be intentional about not synchronizing any files. + // In this case, we should not show any warnings. + if len(rb.Config().Sync.Paths) == 0 { + return nil + } + sync, err := files.GetSync(ctx, rb) if err != nil { return diag.FromErr(err) @@ -31,6 +37,7 @@ func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag. return diag.FromErr(err) } + // If there are files to sync, we don't need to show any warnings. if len(fl) != 0 { return nil } diff --git a/bundle/config/validate/files_to_sync_test.go b/bundle/config/validate/files_to_sync_test.go new file mode 100644 index 000000000..2a598fa72 --- /dev/null +++ b/bundle/config/validate/files_to_sync_test.go @@ -0,0 +1,105 @@ +package validate + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/vfs" + sdkconfig "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestFilesToSync_NoPaths(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Sync: config.Sync{ + Paths: []string{}, + }, + }, + } + + ctx := context.Background() + rb := bundle.ReadOnly(b) + diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync()) + assert.Empty(t, diags) +} + +func setupBundleForFilesToSyncTest(t *testing.T) *bundle.Bundle { + dir := t.TempDir() + + testutil.Touch(t, dir, "file1") + testutil.Touch(t, dir, "file2") + + b := &bundle.Bundle{ + BundleRootPath: dir, + BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + FilePath: "/this/doesnt/matter", + CurrentUser: &config.User{ + User: &iam.User{}, + }, + }, + Sync: config.Sync{ + // Paths are relative to [SyncRootPath]. + Paths: []string{"."}, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &sdkconfig.Config{ + Host: "https://foo.com", + } + + // The initialization logic in [sync.New] performs a check on the destination path. + // Removing this check at initialization time is tbd... + m.GetMockWorkspaceAPI().EXPECT().GetStatusByPath(mock.Anything, "/this/doesnt/matter").Return(&workspace.ObjectInfo{ + ObjectType: workspace.ObjectTypeDirectory, + }, nil) + + b.SetWorkpaceClient(m.WorkspaceClient) + return b +} + +func TestFilesToSync_EverythingIgnored(t *testing.T) { + b := setupBundleForFilesToSyncTest(t) + + // Ignore all files. + testutil.WriteFile(t, "*\n.*\n", b.BundleRootPath, ".gitignore") + + ctx := context.Background() + rb := bundle.ReadOnly(b) + diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync()) + require.Equal(t, 1, len(diags)) + assert.Equal(t, diag.Warning, diags[0].Severity) + assert.Equal(t, "There are no files to sync, please check your .gitignore", diags[0].Summary) +} + +func TestFilesToSync_EverythingExcluded(t *testing.T) { + b := setupBundleForFilesToSyncTest(t) + + // Exclude all files. + b.Config.Sync.Exclude = []string{"*"} + + ctx := context.Background() + rb := bundle.ReadOnly(b) + diags := bundle.ApplyReadOnly(ctx, rb, FilesToSync()) + require.Equal(t, 1, len(diags)) + assert.Equal(t, diag.Warning, diags[0].Severity) + assert.Equal(t, "There are no files to sync, please check your .gitignore and sync.exclude configuration", diags[0].Summary) +} From 490dd058aa6bceee65b1778a50e3ef42ad9d11dd Mon Sep 17 00:00:00 2001 From: Ilya Kuznetsov Date: Fri, 22 Nov 2024 15:44:33 +0100 Subject: [PATCH 33/42] Extended message for warning when source-linked mode is used outside of the workspace (#1929) ## Changes Added path and locations to the warning which displayed when source-linked mode is used outside of the workspace --- bundle/config/mutator/apply_presets.go | 14 +++++++++++++- bundle/config/mutator/apply_presets_test.go | 4 ++++ 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index 9cec704e6..381703756 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -225,9 +225,21 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos if config.IsExplicitlyEnabled((b.Config.Presets.SourceLinkedDeployment)) { isDatabricksWorkspace := dbr.RunsOnRuntime(ctx) && strings.HasPrefix(b.SyncRootPath, "/Workspace/") if !isDatabricksWorkspace { + target := b.Config.Bundle.Target + path := dyn.NewPath(dyn.Key("targets"), dyn.Key(target), dyn.Key("presets"), dyn.Key("source_linked_deployment")) + diags = diags.Append( + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "source-linked deployment is available only in the Databricks Workspace", + Paths: []dyn.Path{ + path, + }, + Locations: b.Config.GetLocations(path[2:].String()), + }, + ) + disabled := false b.Config.Presets.SourceLinkedDeployment = &disabled - diags = diags.Extend(diag.Warningf("source-linked deployment is available only in the Databricks Workspace")) } } diff --git a/bundle/config/mutator/apply_presets_test.go b/bundle/config/mutator/apply_presets_test.go index f11a45d63..497ef051a 100644 --- a/bundle/config/mutator/apply_presets_test.go +++ b/bundle/config/mutator/apply_presets_test.go @@ -9,7 +9,9 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" "github.com/databricks/cli/libs/dbr" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" @@ -435,6 +437,7 @@ func TestApplyPresetsSourceLinkedDeployment(t *testing.T) { }, } + bundletest.SetLocation(b, "presets.source_linked_deployment", []dyn.Location{{File: "databricks.yml"}}) diags := bundle.Apply(tt.ctx, b, mutator.ApplyPresets()) if diags.HasError() { t.Fatalf("unexpected error: %v", diags) @@ -442,6 +445,7 @@ func TestApplyPresetsSourceLinkedDeployment(t *testing.T) { if tt.expectedWarning != "" { require.Equal(t, tt.expectedWarning, diags[0].Summary) + require.NotEmpty(t, diags[0].Locations) } require.Equal(t, tt.expectedValue, b.Config.Presets.SourceLinkedDeployment) From b323703c1b1213bda89ba5f63b3e69073623d2bd Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Fri, 22 Nov 2024 21:18:09 +0530 Subject: [PATCH 34/42] Add validation for single node clusters (#1909) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This PR adds a warning validating that the configuration for a single node cluster is valid for interactive, job, job-task, and pipeline clusters. Note: We skip the validation if a cluster policy is configured because the policy is likely to configure `spark_conf` / `custom_tags` itself. Note: Terrform originally only had validation for interactive, job, and job-task clusters. This PR adding the validation for pipeline clusters as well is new. This PR follows the same logic as we used to have in Terraform. The validation was removed from Terraform because we had no way to demote the error to a warning: https://github.com/databricks/terraform-provider-databricks/pull/4222 ### Background Single-node clusters require `spark_conf` and `custom_tags` to be correctly set in the cluster definition for them to function optimally. The cluster will be created even if incorrectly configured, but its performance will not be great. For example, if both `spark_conf` and `custom_tags` are not set and `num_workers` is 0, then only the driver process will be launched on the cluster compute instance thus leading to sub-optimal utilization of available compute resources and no parallelization across worker processes when processing a spark query. ### Issue This PR addresses some issues reported in https://github.com/databricks/cli/issues/1546 ## Tests Unit tests and manually. Example output of the warning: ``` ➜ bundle-playground git:(master) ✗ cli bundle validate Warning: Single node cluster is not correctly configured at resources.pipelines.bar.clusters[0] in databricks.yml:29:11 num_workers should be 0 only for single-node clusters. To create a valid single node cluster please ensure that the following properties are correctly set in the cluster specification: spark_conf: spark.databricks.cluster.profile: singleNode spark.master: local[*] custom_tags: ResourceClass: SingleNode Name: foobar Target: default Workspace: User: shreyas.goenka@databricks.com Path: /Workspace/Users/shreyas.goenka@databricks.com/.bundle/foobar/default Found 1 warning ``` --- bundle/config/validate/single_node_cluster.go | 137 +++++ .../validate/single_node_cluster_test.go | 566 ++++++++++++++++++ bundle/config/validate/validate.go | 1 + 3 files changed, 704 insertions(+) create mode 100644 bundle/config/validate/single_node_cluster.go create mode 100644 bundle/config/validate/single_node_cluster_test.go diff --git a/bundle/config/validate/single_node_cluster.go b/bundle/config/validate/single_node_cluster.go new file mode 100644 index 000000000..7c159f61a --- /dev/null +++ b/bundle/config/validate/single_node_cluster.go @@ -0,0 +1,137 @@ +package validate + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +// Validates that any single node clusters defined in the bundle are correctly configured. +func SingleNodeCluster() bundle.ReadOnlyMutator { + return &singleNodeCluster{} +} + +type singleNodeCluster struct{} + +func (m *singleNodeCluster) Name() string { + return "validate:SingleNodeCluster" +} + +const singleNodeWarningDetail = `num_workers should be 0 only for single-node clusters. To create a +valid single node cluster please ensure that the following properties +are correctly set in the cluster specification: + + spark_conf: + spark.databricks.cluster.profile: singleNode + spark.master: local[*] + + custom_tags: + ResourceClass: SingleNode + ` + +const singleNodeWarningSummary = `Single node cluster is not correctly configured` + +func showSingleNodeClusterWarning(ctx context.Context, v dyn.Value) bool { + // Check if the user has explicitly set the num_workers to 0. Skip the warning + // if that's not the case. + numWorkers, ok := v.Get("num_workers").AsInt() + if !ok || numWorkers > 0 { + return false + } + + // Convenient type that contains the common fields from compute.ClusterSpec and + // pipelines.PipelineCluster that we are interested in. + type ClusterConf struct { + SparkConf map[string]string `json:"spark_conf"` + CustomTags map[string]string `json:"custom_tags"` + PolicyId string `json:"policy_id"` + } + + conf := &ClusterConf{} + err := convert.ToTyped(conf, v) + if err != nil { + return false + } + + // If the policy id is set, we don't want to show the warning. This is because + // the user might have configured `spark_conf` and `custom_tags` correctly + // in their cluster policy. + if conf.PolicyId != "" { + return false + } + + profile, ok := conf.SparkConf["spark.databricks.cluster.profile"] + if !ok { + log.Debugf(ctx, "spark_conf spark.databricks.cluster.profile not found in single-node cluster spec") + return true + } + if profile != "singleNode" { + log.Debugf(ctx, "spark_conf spark.databricks.cluster.profile is not singleNode in single-node cluster spec: %s", profile) + return true + } + + master, ok := conf.SparkConf["spark.master"] + if !ok { + log.Debugf(ctx, "spark_conf spark.master not found in single-node cluster spec") + return true + } + if !strings.HasPrefix(master, "local") { + log.Debugf(ctx, "spark_conf spark.master does not start with local in single-node cluster spec: %s", master) + return true + } + + resourceClass, ok := conf.CustomTags["ResourceClass"] + if !ok { + log.Debugf(ctx, "custom_tag ResourceClass not found in single-node cluster spec") + return true + } + if resourceClass != "SingleNode" { + log.Debugf(ctx, "custom_tag ResourceClass is not SingleNode in single-node cluster spec: %s", resourceClass) + return true + } + + return false +} + +func (m *singleNodeCluster) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + diags := diag.Diagnostics{} + + patterns := []dyn.Pattern{ + // Interactive clusters + dyn.NewPattern(dyn.Key("resources"), dyn.Key("clusters"), dyn.AnyKey()), + // Job clusters + dyn.NewPattern(dyn.Key("resources"), dyn.Key("jobs"), dyn.AnyKey(), dyn.Key("job_clusters"), dyn.AnyIndex(), dyn.Key("new_cluster")), + // Job task clusters + dyn.NewPattern(dyn.Key("resources"), dyn.Key("jobs"), dyn.AnyKey(), dyn.Key("tasks"), dyn.AnyIndex(), dyn.Key("new_cluster")), + // Job for each task clusters + dyn.NewPattern(dyn.Key("resources"), dyn.Key("jobs"), dyn.AnyKey(), dyn.Key("tasks"), dyn.AnyIndex(), dyn.Key("for_each_task"), dyn.Key("task"), dyn.Key("new_cluster")), + // Pipeline clusters + dyn.NewPattern(dyn.Key("resources"), dyn.Key("pipelines"), dyn.AnyKey(), dyn.Key("clusters"), dyn.AnyIndex()), + } + + for _, p := range patterns { + _, err := dyn.MapByPattern(rb.Config().Value(), p, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + warning := diag.Diagnostic{ + Severity: diag.Warning, + Summary: singleNodeWarningSummary, + Detail: singleNodeWarningDetail, + Locations: v.Locations(), + Paths: []dyn.Path{p}, + } + + if showSingleNodeClusterWarning(ctx, v) { + diags = append(diags, warning) + } + return v, nil + }) + if err != nil { + log.Debugf(ctx, "Error while applying single node cluster validation: %s", err) + } + } + return diags +} diff --git a/bundle/config/validate/single_node_cluster_test.go b/bundle/config/validate/single_node_cluster_test.go new file mode 100644 index 000000000..18771cc00 --- /dev/null +++ b/bundle/config/validate/single_node_cluster_test.go @@ -0,0 +1,566 @@ +package validate + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/assert" +) + +func failCases() []struct { + name string + sparkConf map[string]string + customTags map[string]string +} { + return []struct { + name string + sparkConf map[string]string + customTags map[string]string + }{ + { + name: "no tags or conf", + }, + { + name: "no tags", + sparkConf: map[string]string{ + "spark.databricks.cluster.profile": "singleNode", + "spark.master": "local[*]", + }, + }, + { + name: "no conf", + customTags: map[string]string{"ResourceClass": "SingleNode"}, + }, + { + name: "invalid spark cluster profile", + sparkConf: map[string]string{ + "spark.databricks.cluster.profile": "invalid", + "spark.master": "local[*]", + }, + customTags: map[string]string{"ResourceClass": "SingleNode"}, + }, + { + name: "invalid spark.master", + sparkConf: map[string]string{ + "spark.databricks.cluster.profile": "singleNode", + "spark.master": "invalid", + }, + customTags: map[string]string{"ResourceClass": "SingleNode"}, + }, + { + name: "invalid tags", + sparkConf: map[string]string{ + "spark.databricks.cluster.profile": "singleNode", + "spark.master": "local[*]", + }, + customTags: map[string]string{"ResourceClass": "invalid"}, + }, + { + name: "missing ResourceClass tag", + sparkConf: map[string]string{ + "spark.databricks.cluster.profile": "singleNode", + "spark.master": "local[*]", + }, + customTags: map[string]string{"what": "ever"}, + }, + { + name: "missing spark.master", + sparkConf: map[string]string{ + "spark.databricks.cluster.profile": "singleNode", + }, + customTags: map[string]string{"ResourceClass": "SingleNode"}, + }, + { + name: "missing spark.databricks.cluster.profile", + sparkConf: map[string]string{ + "spark.master": "local[*]", + }, + customTags: map[string]string{"ResourceClass": "SingleNode"}, + }, + } +} + +func TestValidateSingleNodeClusterFailForInteractiveClusters(t *testing.T) { + ctx := context.Background() + + for _, tc := range failCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Clusters: map[string]*resources.Cluster{ + "foo": { + ClusterSpec: &compute.ClusterSpec{ + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.clusters.foo", []dyn.Location{{File: "a.yml", Line: 1, Column: 1}}) + + // We can't set num_workers to 0 explicitly in the typed configuration. + // Do it on the dyn.Value directly. + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.clusters.foo.num_workers", dyn.V(0)) + }) + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Equal(t, diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: singleNodeWarningSummary, + Detail: singleNodeWarningDetail, + Locations: []dyn.Location{{File: "a.yml", Line: 1, Column: 1}}, + Paths: []dyn.Path{dyn.NewPath(dyn.Key("resources"), dyn.Key("clusters"), dyn.Key("foo"))}, + }, + }, diags) + }) + } +} + +func TestValidateSingleNodeClusterFailForJobClusters(t *testing.T) { + ctx := context.Background() + + for _, tc := range failCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + ClusterName: "my_cluster", + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + }, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.jobs.foo.job_clusters[0].new_cluster", []dyn.Location{{File: "b.yml", Line: 1, Column: 1}}) + + // We can't set num_workers to 0 explicitly in the typed configuration. + // Do it on the dyn.Value directly. + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.jobs.foo.job_clusters[0].new_cluster.num_workers", dyn.V(0)) + }) + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Equal(t, diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: singleNodeWarningSummary, + Detail: singleNodeWarningDetail, + Locations: []dyn.Location{{File: "b.yml", Line: 1, Column: 1}}, + Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.job_clusters[0].new_cluster")}, + }, + }, diags) + + }) + } +} + +func TestValidateSingleNodeClusterFailForJobTaskClusters(t *testing.T) { + ctx := context.Background() + + for _, tc := range failCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + NewCluster: &compute.ClusterSpec{ + ClusterName: "my_cluster", + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + }, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.jobs.foo.tasks[0].new_cluster", []dyn.Location{{File: "c.yml", Line: 1, Column: 1}}) + + // We can't set num_workers to 0 explicitly in the typed configuration. + // Do it on the dyn.Value directly. + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.jobs.foo.tasks[0].new_cluster.num_workers", dyn.V(0)) + }) + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Equal(t, diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: singleNodeWarningSummary, + Detail: singleNodeWarningDetail, + Locations: []dyn.Location{{File: "c.yml", Line: 1, Column: 1}}, + Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.tasks[0].new_cluster")}, + }, + }, diags) + }) + } +} + +func TestValidateSingleNodeClusterFailForPipelineClusters(t *testing.T) { + ctx := context.Background() + + for _, tc := range failCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "foo": { + PipelineSpec: &pipelines.PipelineSpec{ + Clusters: []pipelines.PipelineCluster{ + { + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.pipelines.foo.clusters[0]", []dyn.Location{{File: "d.yml", Line: 1, Column: 1}}) + + // We can't set num_workers to 0 explicitly in the typed configuration. + // Do it on the dyn.Value directly. + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.pipelines.foo.clusters[0].num_workers", dyn.V(0)) + }) + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Equal(t, diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: singleNodeWarningSummary, + Detail: singleNodeWarningDetail, + Locations: []dyn.Location{{File: "d.yml", Line: 1, Column: 1}}, + Paths: []dyn.Path{dyn.MustPathFromString("resources.pipelines.foo.clusters[0]")}, + }, + }, diags) + }) + } +} + +func TestValidateSingleNodeClusterFailForJobForEachTaskCluster(t *testing.T) { + ctx := context.Background() + + for _, tc := range failCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + NewCluster: &compute.ClusterSpec{ + ClusterName: "my_cluster", + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.jobs.foo.tasks[0].for_each_task.task.new_cluster", []dyn.Location{{File: "e.yml", Line: 1, Column: 1}}) + + // We can't set num_workers to 0 explicitly in the typed configuration. + // Do it on the dyn.Value directly. + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.jobs.foo.tasks[0].for_each_task.task.new_cluster.num_workers", dyn.V(0)) + }) + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Equal(t, diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: singleNodeWarningSummary, + Detail: singleNodeWarningDetail, + Locations: []dyn.Location{{File: "e.yml", Line: 1, Column: 1}}, + Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.foo.tasks[0].for_each_task.task.new_cluster")}, + }, + }, diags) + }) + } +} + +func passCases() []struct { + name string + numWorkers *int + sparkConf map[string]string + customTags map[string]string + policyId string +} { + zero := 0 + one := 1 + + return []struct { + name string + numWorkers *int + sparkConf map[string]string + customTags map[string]string + policyId string + }{ + { + name: "single node cluster", + sparkConf: map[string]string{ + "spark.databricks.cluster.profile": "singleNode", + "spark.master": "local[*]", + }, + customTags: map[string]string{ + "ResourceClass": "SingleNode", + }, + numWorkers: &zero, + }, + { + name: "num workers is not zero", + numWorkers: &one, + }, + { + name: "num workers is not set", + }, + { + name: "policy id is not empty", + policyId: "policy-abc", + numWorkers: &zero, + }, + } +} + +func TestValidateSingleNodeClusterPassInteractiveClusters(t *testing.T) { + ctx := context.Background() + + for _, tc := range passCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Clusters: map[string]*resources.Cluster{ + "foo": { + ClusterSpec: &compute.ClusterSpec{ + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + PolicyId: tc.policyId, + }, + }, + }, + }, + }, + } + + if tc.numWorkers != nil { + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.clusters.foo.num_workers", dyn.V(*tc.numWorkers)) + }) + } + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Empty(t, diags) + }) + } +} + +func TestValidateSingleNodeClusterPassJobClusters(t *testing.T) { + ctx := context.Background() + + for _, tc := range passCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + ClusterName: "my_cluster", + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + PolicyId: tc.policyId, + }, + }, + }, + }, + }, + }, + }, + }, + } + + if tc.numWorkers != nil { + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.jobs.foo.job_clusters[0].new_cluster.num_workers", dyn.V(*tc.numWorkers)) + }) + } + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Empty(t, diags) + }) + } +} + +func TestValidateSingleNodeClusterPassJobTaskClusters(t *testing.T) { + ctx := context.Background() + + for _, tc := range passCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + NewCluster: &compute.ClusterSpec{ + ClusterName: "my_cluster", + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + PolicyId: tc.policyId, + }, + }, + }, + }, + }, + }, + }, + }, + } + + if tc.numWorkers != nil { + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.jobs.foo.tasks[0].new_cluster.num_workers", dyn.V(*tc.numWorkers)) + }) + } + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Empty(t, diags) + }) + } +} + +func TestValidateSingleNodeClusterPassPipelineClusters(t *testing.T) { + ctx := context.Background() + + for _, tc := range passCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "foo": { + PipelineSpec: &pipelines.PipelineSpec{ + Clusters: []pipelines.PipelineCluster{ + { + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + PolicyId: tc.policyId, + }, + }, + }, + }, + }, + }, + }, + } + + if tc.numWorkers != nil { + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.pipelines.foo.clusters[0].num_workers", dyn.V(*tc.numWorkers)) + }) + } + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Empty(t, diags) + }) + } +} + +func TestValidateSingleNodeClusterPassJobForEachTaskCluster(t *testing.T) { + ctx := context.Background() + + for _, tc := range passCases() { + t.Run(tc.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + NewCluster: &compute.ClusterSpec{ + ClusterName: "my_cluster", + SparkConf: tc.sparkConf, + CustomTags: tc.customTags, + PolicyId: tc.policyId, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + if tc.numWorkers != nil { + bundletest.Mutate(t, b, func(v dyn.Value) (dyn.Value, error) { + return dyn.Set(v, "resources.jobs.foo.tasks[0].for_each_task.task.new_cluster.num_workers", dyn.V(*tc.numWorkers)) + }) + } + + diags := bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), SingleNodeCluster()) + assert.Empty(t, diags) + }) + } +} diff --git a/bundle/config/validate/validate.go b/bundle/config/validate/validate.go index 440477e65..eb4c3c3cd 100644 --- a/bundle/config/validate/validate.go +++ b/bundle/config/validate/validate.go @@ -36,6 +36,7 @@ func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics ValidateSyncPatterns(), JobTaskClusterSpec(), ValidateFolderPermissions(), + SingleNodeCluster(), )) } From 4b069bb6e1ec3f349b86fe4e97995f2f2b64a328 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 25 Nov 2024 13:46:20 +0000 Subject: [PATCH 35/42] Bump golang.org/x/term from 0.25.0 to 0.26.0 (#1907) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.25.0 to 0.26.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.25.0&new-version=0.26.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index 9ae5fde0d..73b9984d3 100644 --- a/go.mod +++ b/go.mod @@ -27,7 +27,7 @@ require ( golang.org/x/mod v0.22.0 golang.org/x/oauth2 v0.24.0 golang.org/x/sync v0.9.0 - golang.org/x/term v0.25.0 + golang.org/x/term v0.26.0 golang.org/x/text v0.20.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 @@ -64,7 +64,7 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.24.0 // indirect golang.org/x/net v0.26.0 // indirect - golang.org/x/sys v0.26.0 // indirect + golang.org/x/sys v0.27.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.182.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect diff --git a/go.sum b/go.sum index 2bfcfb2fa..928827d9d 100644 --- a/go.sum +++ b/go.sum @@ -212,10 +212,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.26.0 h1:KHjCJyddX0LoSTb3J+vWpupP9p0oznkqVk/IfjymZbo= -golang.org/x/sys v0.26.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.25.0 h1:WtHI/ltw4NvSUig5KARz9h521QvRC8RmF/cuYqifU24= -golang.org/x/term v0.25.0/go.mod h1:RPyXicDX+6vLxogjjRxjgD2TKtmAO6NZBsBRfrOLu7M= +golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s= +golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU= +golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug= From 026c5555b2757d9575053ae02ecebb52a8d78a51 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 16:52:43 +0100 Subject: [PATCH 36/42] Bump github.com/Masterminds/semver/v3 from 3.3.0 to 3.3.1 (#1930) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/Masterminds/semver/v3](https://github.com/Masterminds/semver) from 3.3.0 to 3.3.1.
Release notes

Sourced from github.com/Masterminds/semver/v3's releases.

v3.3.1

What's Changed

Full Changelog: https://github.com/Masterminds/semver/compare/v3.3.0...v3.3.1

Changelog

Sourced from github.com/Masterminds/semver/v3's changelog.

Changelog

Commits
  • 1558ca3 Merge pull request #253 from mattfarina/fix-bad-versions
  • 252dd61 Fix for allowing some version that were invalid
  • See full diff in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/Masterminds/semver/v3&package-manager=go_modules&previous-version=3.3.0&new-version=3.3.1)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 73b9984d3..ab5194500 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.23 toolchain go1.23.2 require ( - github.com/Masterminds/semver/v3 v3.3.0 // MIT + github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 github.com/databricks/databricks-sdk-go v0.51.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT diff --git a/go.sum b/go.sum index 928827d9d..6c318ac82 100644 --- a/go.sum +++ b/go.sum @@ -8,8 +8,8 @@ cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1h dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Masterminds/semver/v3 v3.3.0 h1:B8LGeaivUe71a5qox1ICM/JLl0NqZSW5CHyL+hmvYS0= -github.com/Masterminds/semver/v3 v3.3.0/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= +github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= +github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= From 85c0d2d3ee7e81070e4af8f8f79b59cacaeb6fb9 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 26 Nov 2024 17:12:11 +0000 Subject: [PATCH 37/42] Bump github.com/stretchr/testify from 1.9.0 to 1.10.0 (#1932) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Bumps [github.com/stretchr/testify](https://github.com/stretchr/testify) from 1.9.0 to 1.10.0.
Release notes

Sourced from github.com/stretchr/testify's releases.

v1.10.0

What's Changed

Functional Changes

Fixes

Documantation, Build & CI

New Contributors

... (truncated)

Commits
  • 89cbdd9 Merge pull request #1626 from arjun-1/fix-functional-options-diff-indirect-calls
  • 07bac60 Merge pull request #1667 from sikehish/flaky
  • 716de8d Increase timeouts in Test_Mock_Called_blocks to reduce flakiness in CI
  • 118fb83 NotSame should fail if args are not pointers #1661 (#1664)
  • 7d99b2b attempt 2
  • 05f87c0 more similar
  • ea7129e better fmt
  • a1b9c9e Merge pull request #1663 from ybrustin/master
  • 8302de9 Merge branch 'master' into master
  • 89352f7 Merge pull request #1518 from hendrywiranto/adjust-readme-remove-v2
  • Additional commits viewable in compare view

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/stretchr/testify&package-manager=go_modules&previous-version=1.9.0&new-version=1.10.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index ab5194500..353313131 100644 --- a/go.mod +++ b/go.mod @@ -22,7 +22,7 @@ require ( github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // MIT github.com/spf13/cobra v1.8.1 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause - github.com/stretchr/testify v1.9.0 // MIT + github.com/stretchr/testify v1.10.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.22.0 golang.org/x/oauth2 v0.24.0 diff --git a/go.sum b/go.sum index 6c318ac82..312a4d934 100644 --- a/go.sum +++ b/go.sum @@ -156,8 +156,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/ github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= github.com/zclconf/go-cty v1.15.0 h1:tTCRWxsexYUmtt/wVxgDClUe+uQusuI443uL6e+5sXQ= From fae1b6742dfc173ac27847e99e40e7f7171eb8be Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 27 Nov 2024 12:51:08 +0100 Subject: [PATCH 38/42] Update target references to use `${bundle.target}` (#1935) ## Changes The built-in template contains a reference to `${bundle.environment}`. This property has been deprecated in favor of `${bundle.target}` a long time ago (#670), so we should no longer emit it. The environment field will continue to be usable until we cut a new major version in some far away future. ## Tests * Unit tests * The test `TestInterpolationWithTarget` still covers correct interpolation of `${bundle.environment}` --- bundle/tests/python_wheel/python_wheel/bundle.yml | 2 +- bundle/tests/python_wheel/python_wheel_dbfs_lib/bundle.yml | 2 +- bundle/tests/python_wheel/python_wheel_multiple/bundle.yml | 2 +- bundle/tests/python_wheel/python_wheel_no_artifact/bundle.yml | 2 +- .../python_wheel/python_wheel_no_artifact_no_setup/bundle.yml | 2 +- .../python_wheel/python_wheel_no_artifact_notebook/bundle.yml | 2 +- bundle/tests/python_wheel/python_wheel_no_build/bundle.yml | 2 +- .../resources/{{.project_name}}.pipeline.yml.tmpl | 2 +- 8 files changed, 8 insertions(+), 8 deletions(-) diff --git a/bundle/tests/python_wheel/python_wheel/bundle.yml b/bundle/tests/python_wheel/python_wheel/bundle.yml index c82ff83f7..017fe1c43 100644 --- a/bundle/tests/python_wheel/python_wheel/bundle.yml +++ b/bundle/tests/python_wheel/python_wheel/bundle.yml @@ -10,7 +10,7 @@ artifacts: resources: jobs: test_job: - name: "[${bundle.environment}] My Wheel Job" + name: "[${bundle.target}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: "0717-132531-5opeqon1" diff --git a/bundle/tests/python_wheel/python_wheel_dbfs_lib/bundle.yml b/bundle/tests/python_wheel/python_wheel_dbfs_lib/bundle.yml index 07f4957bb..fe2723aa6 100644 --- a/bundle/tests/python_wheel/python_wheel_dbfs_lib/bundle.yml +++ b/bundle/tests/python_wheel/python_wheel_dbfs_lib/bundle.yml @@ -4,7 +4,7 @@ bundle: resources: jobs: test_job: - name: "[${bundle.environment}] My Wheel Job" + name: "[${bundle.target}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: "0717-132531-5opeqon1" diff --git a/bundle/tests/python_wheel/python_wheel_multiple/bundle.yml b/bundle/tests/python_wheel/python_wheel_multiple/bundle.yml index 6964c58a4..770110416 100644 --- a/bundle/tests/python_wheel/python_wheel_multiple/bundle.yml +++ b/bundle/tests/python_wheel/python_wheel_multiple/bundle.yml @@ -14,7 +14,7 @@ artifacts: resources: jobs: test_job: - name: "[${bundle.environment}] My Wheel Job" + name: "[${bundle.target}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: "0717-132531-5opeqon1" diff --git a/bundle/tests/python_wheel/python_wheel_no_artifact/bundle.yml b/bundle/tests/python_wheel/python_wheel_no_artifact/bundle.yml index 88cb47be5..d1d0a4dbd 100644 --- a/bundle/tests/python_wheel/python_wheel_no_artifact/bundle.yml +++ b/bundle/tests/python_wheel/python_wheel_no_artifact/bundle.yml @@ -4,7 +4,7 @@ bundle: resources: jobs: test_job: - name: "[${bundle.environment}] My Wheel Job" + name: "[${bundle.target}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: "0717-aaaaa-bbbbbb" diff --git a/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml index d03084303..948bf1558 100644 --- a/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml +++ b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml @@ -7,7 +7,7 @@ workspace: resources: jobs: test_job: - name: "[${bundle.environment}] My Wheel Job" + name: "[${bundle.target}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: "0717-aaaaa-bbbbbb" diff --git a/bundle/tests/python_wheel/python_wheel_no_artifact_notebook/bundle.yml b/bundle/tests/python_wheel/python_wheel_no_artifact_notebook/bundle.yml index 93e4e6918..77fd6ad99 100644 --- a/bundle/tests/python_wheel/python_wheel_no_artifact_notebook/bundle.yml +++ b/bundle/tests/python_wheel/python_wheel_no_artifact_notebook/bundle.yml @@ -4,7 +4,7 @@ bundle: resources: jobs: test_job: - name: "[${bundle.environment}] My Wheel Job" + name: "[${bundle.target}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: "0717-aaaaa-bbbbbb" diff --git a/bundle/tests/python_wheel/python_wheel_no_build/bundle.yml b/bundle/tests/python_wheel/python_wheel_no_build/bundle.yml index 91b8b1556..e10e3993d 100644 --- a/bundle/tests/python_wheel/python_wheel_no_build/bundle.yml +++ b/bundle/tests/python_wheel/python_wheel_no_build/bundle.yml @@ -4,7 +4,7 @@ bundle: resources: jobs: test_job: - name: "[${bundle.environment}] My Wheel Job" + name: "[${bundle.target}] My Wheel Job" tasks: - task_key: TestTask existing_cluster_id: "0717-132531-5opeqon1" diff --git a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl index 1c6b8607e..50f11fe2c 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/resources/{{.project_name}}.pipeline.yml.tmpl @@ -9,7 +9,7 @@ resources: {{- else}} catalog: {{default_catalog}} {{- end}} - target: {{.project_name}}_${bundle.environment} + target: {{.project_name}}_${bundle.target} libraries: - notebook: path: ../src/dlt_pipeline.ipynb From e57cbf1273a43fb6042e4a33634f1ffd10c08dea Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Wed, 27 Nov 2024 13:14:39 +0100 Subject: [PATCH 39/42] Remove unused field: Repository.real (#1936) --- libs/git/repository.go | 7 ------- 1 file changed, 7 deletions(-) diff --git a/libs/git/repository.go b/libs/git/repository.go index 0bbd57865..f0e9e1eb2 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -19,10 +19,6 @@ var GitDirectoryName = ".git" // Repository represents a Git repository or a directory // that could later be initialized as Git repository. type Repository struct { - // real indicates if this is a real repository or a non-Git - // directory where we process .gitignore files. - real bool - // rootDir is the path to the root of the repository checkout. // This can be either the main repository checkout or a worktree checkout. // For more information about worktrees, see: https://git-scm.com/docs/git-worktree#_description. @@ -209,7 +205,6 @@ func (r *Repository) Ignore(relPath string) (bool, error) { } func NewRepository(path vfs.Path) (*Repository, error) { - real := true rootDir, err := vfs.FindLeafInTree(path, GitDirectoryName) if err != nil { if !errors.Is(err, fs.ErrNotExist) { @@ -217,7 +212,6 @@ func NewRepository(path vfs.Path) (*Repository, error) { } // Cannot find `.git` directory. // Treat the specified path as a potential repository root checkout. - real = false rootDir = path } @@ -229,7 +223,6 @@ func NewRepository(path vfs.Path) (*Repository, error) { } repo := &Repository{ - real: real, rootDir: rootDir, gitDir: gitDir, gitCommonDir: gitCommonDir, From 6fc2093a22d4e366061e593d14ce78c7b6c79447 Mon Sep 17 00:00:00 2001 From: Denis Bilenko Date: Thu, 28 Nov 2024 09:52:21 +0100 Subject: [PATCH 40/42] Remove unused method GitRepository (#1941) --- bundle/bundle.go | 10 ---------- 1 file changed, 10 deletions(-) diff --git a/bundle/bundle.go b/bundle/bundle.go index 856255685..46710538a 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -17,7 +17,6 @@ import ( "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/bundle/metadata" "github.com/databricks/cli/libs/fileset" - "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/tags" @@ -223,15 +222,6 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { return append(b.Config.Sync.Include, filepath.ToSlash(filepath.Join(internalDirRel, "*.*"))), nil } -func (b *Bundle) GitRepository() (*git.Repository, error) { - _, err := vfs.FindLeafInTree(b.BundleRoot, ".git") - if err != nil { - return nil, fmt.Errorf("unable to locate repository root: %w", err) - } - - return git.NewRepository(b.BundleRoot) -} - // AuthEnv returns a map with environment variables and their values // derived from the workspace client configuration that was resolved // in the context of this bundle. From 8053e9c4e48501e1b5476f2cbb329a14a6e0b897 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 28 Nov 2024 13:27:24 +0100 Subject: [PATCH 41/42] Fix segfault in bundle summary command (#1937) ## Changes This PR introduces use of new `isNil` method. It allows to ensure we filter out all improperly defined resources in `bundle summary` command. This includes deleted resources or resources with incorrect configuration such as only defining key of the resource and nothing else. Fixes #1919, #1913 ## Tests Added regression unit test case --- bundle/config/resources.go | 6 +++++ bundle/config/resources/clusters.go | 4 +++ bundle/config/resources/dashboard.go | 4 +++ bundle/config/resources/job.go | 4 +++ bundle/config/resources/mlflow_experiment.go | 4 +++ bundle/config/resources/mlflow_model.go | 4 +++ .../resources/model_serving_endpoint.go | 4 +++ bundle/config/resources/pipeline.go | 4 +++ bundle/config/resources/quality_monitor.go | 4 +++ bundle/config/resources/registered_model.go | 4 +++ bundle/config/resources/schema.go | 4 +++ bundle/render/render_text_output_test.go | 4 +++ bundle/resources/completion_test.go | 22 ++++++++++++---- bundle/resources/lookup_test.go | 25 ++++++++++++++----- 14 files changed, 86 insertions(+), 11 deletions(-) diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 0affb6ef0..2886e3571 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -41,6 +41,9 @@ type ConfigResource interface { // InitializeURL initializes the URL field of the resource. InitializeURL(baseURL url.URL) + + // IsNil returns true if the resource is nil, for example, when it was removed from the bundle. + IsNil() bool } // ResourceGroup represents a group of resources of the same type. @@ -57,6 +60,9 @@ func collectResourceMap[T ConfigResource]( ) ResourceGroup { resources := make(map[string]ConfigResource) for key, resource := range input { + if resource.IsNil() { + continue + } resources[key] = resource } return ResourceGroup{ diff --git a/bundle/config/resources/clusters.go b/bundle/config/resources/clusters.go index eb0247c6e..ba991e865 100644 --- a/bundle/config/resources/clusters.go +++ b/bundle/config/resources/clusters.go @@ -56,3 +56,7 @@ func (s *Cluster) GetName() string { func (s *Cluster) GetURL() string { return s.URL } + +func (s *Cluster) IsNil() bool { + return s.ClusterSpec == nil +} diff --git a/bundle/config/resources/dashboard.go b/bundle/config/resources/dashboard.go index 724b03393..5c58a2f2b 100644 --- a/bundle/config/resources/dashboard.go +++ b/bundle/config/resources/dashboard.go @@ -79,3 +79,7 @@ func (r *Dashboard) GetName() string { func (r *Dashboard) GetURL() string { return r.URL } + +func (r *Dashboard) IsNil() bool { + return r.Dashboard == nil +} diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index 98db1ec5d..0aa41b2e8 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -63,3 +63,7 @@ func (j *Job) GetName() string { func (j *Job) GetURL() string { return j.URL } + +func (j *Job) IsNil() bool { + return j.JobSettings == nil +} diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index a5871468f..5d179ec0f 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -58,3 +58,7 @@ func (s *MlflowExperiment) GetName() string { func (s *MlflowExperiment) GetURL() string { return s.URL } + +func (s *MlflowExperiment) IsNil() bool { + return s.Experiment == nil +} diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 9ead254d8..72376f45d 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -58,3 +58,7 @@ func (s *MlflowModel) GetName() string { func (s *MlflowModel) GetURL() string { return s.URL } + +func (s *MlflowModel) IsNil() bool { + return s.Model == nil +} diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index 7f3ae00c8..a3c472b3f 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -66,3 +66,7 @@ func (s *ModelServingEndpoint) GetName() string { func (s *ModelServingEndpoint) GetURL() string { return s.URL } + +func (s *ModelServingEndpoint) IsNil() bool { + return s.CreateServingEndpoint == nil +} diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index b3311d8e2..eaa4c5368 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -58,3 +58,7 @@ func (p *Pipeline) GetName() string { func (s *Pipeline) GetURL() string { return s.URL } + +func (s *Pipeline) IsNil() bool { + return s.PipelineSpec == nil +} diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index 30ec4f918..b1d7e08a5 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -62,3 +62,7 @@ func (s *QualityMonitor) GetName() string { func (s *QualityMonitor) GetURL() string { return s.URL } + +func (s *QualityMonitor) IsNil() bool { + return s.CreateMonitor == nil +} diff --git a/bundle/config/resources/registered_model.go b/bundle/config/resources/registered_model.go index c44526d09..8513a79ae 100644 --- a/bundle/config/resources/registered_model.go +++ b/bundle/config/resources/registered_model.go @@ -68,3 +68,7 @@ func (s *RegisteredModel) GetName() string { func (s *RegisteredModel) GetURL() string { return s.URL } + +func (s *RegisteredModel) IsNil() bool { + return s.CreateRegisteredModelRequest == nil +} diff --git a/bundle/config/resources/schema.go b/bundle/config/resources/schema.go index a9f905cf1..8eadd7e46 100644 --- a/bundle/config/resources/schema.go +++ b/bundle/config/resources/schema.go @@ -56,3 +56,7 @@ func (s *Schema) UnmarshalJSON(b []byte) error { func (s Schema) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } + +func (s *Schema) IsNil() bool { + return s.CreateSchema == nil +} diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go index cd9e7723b..135d79dae 100644 --- a/bundle/render/render_text_output_test.go +++ b/bundle/render/render_text_output_test.go @@ -519,6 +519,10 @@ func TestRenderSummary(t *testing.T) { URL: "https://url2", JobSettings: &jobs.JobSettings{Name: "job2-name"}, }, + "job3": { + ID: "3", + URL: "https://url3", // This emulates deleted job + }, }, Pipelines: map[string]*resources.Pipeline{ "pipeline2": { diff --git a/bundle/resources/completion_test.go b/bundle/resources/completion_test.go index 2f7942aae..80412b6f1 100644 --- a/bundle/resources/completion_test.go +++ b/bundle/resources/completion_test.go @@ -6,6 +6,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/stretchr/testify/assert" ) @@ -14,11 +16,17 @@ func TestCompletions_SkipDuplicates(t *testing.T) { Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "foo": {}, - "bar": {}, + "foo": { + JobSettings: &jobs.JobSettings{}, + }, + "bar": { + JobSettings: &jobs.JobSettings{}, + }, }, Pipelines: map[string]*resources.Pipeline{ - "foo": {}, + "foo": { + PipelineSpec: &pipelines.PipelineSpec{}, + }, }, }, }, @@ -36,10 +44,14 @@ func TestCompletions_Filter(t *testing.T) { Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "foo": {}, + "foo": { + JobSettings: &jobs.JobSettings{}, + }, }, Pipelines: map[string]*resources.Pipeline{ - "bar": {}, + "bar": { + PipelineSpec: &pipelines.PipelineSpec{}, + }, }, }, }, diff --git a/bundle/resources/lookup_test.go b/bundle/resources/lookup_test.go index b2eaafd1a..0ea5af7a2 100644 --- a/bundle/resources/lookup_test.go +++ b/bundle/resources/lookup_test.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -28,8 +29,12 @@ func TestLookup_NotFound(t *testing.T) { Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "foo": {}, - "bar": {}, + "foo": { + JobSettings: &jobs.JobSettings{}, + }, + "bar": { + JobSettings: &jobs.JobSettings{}, + }, }, }, }, @@ -45,10 +50,14 @@ func TestLookup_MultipleFound(t *testing.T) { Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "foo": {}, + "foo": { + JobSettings: &jobs.JobSettings{}, + }, }, Pipelines: map[string]*resources.Pipeline{ - "foo": {}, + "foo": { + PipelineSpec: &pipelines.PipelineSpec{}, + }, }, }, }, @@ -92,10 +101,14 @@ func TestLookup_NominalWithFilters(t *testing.T) { Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "foo": {}, + "foo": { + JobSettings: &jobs.JobSettings{}, + }, }, Pipelines: map[string]*resources.Pipeline{ - "bar": {}, + "bar": { + PipelineSpec: &pipelines.PipelineSpec{}, + }, }, }, }, From 7b9726dd649efa169cd86c3c0d1f0395e4655a38 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Thu, 28 Nov 2024 15:33:51 +0000 Subject: [PATCH 42/42] Bump github.com/databricks/databricks-sdk-go from 0.51.0 to 0.52.0 (#1931) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.51.0 to 0.52.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.52.0

Internal Changes

  • Update Jobs GetRun API to support paginated responses for jobs and ForEach tasks (#1089).

API Changes:

OpenAPI SHA: f2385add116e3716c8a90a0b68e204deb40f996c, Date: 2024-11-15

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

[Release] Release v0.52.0

Internal Changes

  • Update Jobs GetRun API to support paginated responses for jobs and ForEach tasks (#1089).

API Changes:

OpenAPI SHA: f2385add116e3716c8a90a0b68e204deb40f996c, Date: 2024-11-15

Commits

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.51.0&new-version=0.52.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- bundle/schema/jsonschema.json | 8 +- cmd/account/budgets/budgets.go | 2 +- .../custom-app-integration.go | 5 +- .../workspace-assignment.go | 2 +- cmd/workspace/apps/apps.go | 8 +- cmd/workspace/apps/overrides.go | 59 ------------ cmd/workspace/credentials/credentials.go | 95 ++++++++++++++----- cmd/workspace/lakeview/lakeview.go | 10 +- .../notification-destinations.go | 5 +- cmd/workspace/repos/repos.go | 6 +- .../token-management/token-management.go | 6 +- go.mod | 2 +- go.sum | 4 +- 14 files changed, 105 insertions(+), 109 deletions(-) delete mode 100644 cmd/workspace/apps/overrides.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 5f4b50860..a2ba58aa5 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -d25296d2f4aa7bd6195c816fdf82e0f960f775da \ No newline at end of file +f2385add116e3716c8a90a0b68e204deb40f996c \ No newline at end of file diff --git a/bundle/schema/jsonschema.json b/bundle/schema/jsonschema.json index 703daafeb..cf003f423 100644 --- a/bundle/schema/jsonschema.json +++ b/bundle/schema/jsonschema.json @@ -817,6 +817,9 @@ "metastore": { "$ref": "#/$defs/string" }, + "notification_destination": { + "$ref": "#/$defs/string" + }, "pipeline": { "$ref": "#/$defs/string" }, @@ -1079,6 +1082,9 @@ "pipelines_development": { "$ref": "#/$defs/bool" }, + "source_linked_deployment": { + "$ref": "#/$defs/bool" + }, "tags": { "$ref": "#/$defs/map/string" }, @@ -2824,7 +2830,7 @@ "anyOf": [ { "type": "object", - "description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nExactly one of `user_name`, `service_principal_name`, `group_name` should be specified. If not, an error is thrown.", + "description": "Write-only setting. Specifies the user, service principal or group that the job/pipeline runs as. If not specified, the job/pipeline runs as the user who created the job/pipeline.\n\nEither `user_name` or `service_principal_name` should be specified. If not, an error is thrown.", "properties": { "service_principal_name": { "description": "Application ID of an active service principal. Setting this field requires the `servicePrincipal/user` role.", diff --git a/cmd/account/budgets/budgets.go b/cmd/account/budgets/budgets.go index 87ed41d57..748dc6994 100755 --- a/cmd/account/budgets/budgets.go +++ b/cmd/account/budgets/budgets.go @@ -194,7 +194,7 @@ func newGet() *cobra.Command { configuration are specified by ID. Arguments: - BUDGET_ID: The Databricks budget configuration ID.` + BUDGET_ID: The budget configuration ID` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index 9d16a44d4..1eec1018e 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -195,7 +195,10 @@ func newGet() *cobra.Command { cmd.Short = `Get OAuth Custom App Integration.` cmd.Long = `Get OAuth Custom App Integration. - Gets the Custom OAuth App Integration for the given integration id.` + Gets the Custom OAuth App Integration for the given integration id. + + Arguments: + INTEGRATION_ID: The OAuth app integration ID.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index e09095d37..c5385c92a 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -257,7 +257,7 @@ func newUpdate() *cobra.Command { workspace for the specified principal. Arguments: - WORKSPACE_ID: The workspace ID for the account. + WORKSPACE_ID: The workspace ID. PRINCIPAL_ID: The ID of the user, service principal, or group.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 514da697b..a103ba7a8 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -942,14 +942,13 @@ func newUpdate() *cobra.Command { // TODO: complex arg: pending_deployment // TODO: array: resources - cmd.Use = "update NAME NAME" + cmd.Use = "update NAME" cmd.Short = `Update an app.` cmd.Long = `Update an app. Updates the app with the supplied name. Arguments: - NAME: The name of the app. NAME: The name of the app. The name must contain only lowercase alphanumeric characters and hyphens. It must be unique within the workspace.` @@ -963,7 +962,7 @@ func newUpdate() *cobra.Command { } return nil } - check := root.ExactArgs(2) + check := root.ExactArgs(1) return check(cmd, args) } @@ -985,9 +984,6 @@ func newUpdate() *cobra.Command { } } updateReq.Name = args[0] - if !cmd.Flags().Changed("json") { - updateReq.App.Name = args[1] - } response, err := w.Apps.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/apps/overrides.go b/cmd/workspace/apps/overrides.go deleted file mode 100644 index debd9f5a6..000000000 --- a/cmd/workspace/apps/overrides.go +++ /dev/null @@ -1,59 +0,0 @@ -package apps - -import ( - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/apps" - "github.com/spf13/cobra" -) - -// We override apps.Update command beccause currently genkit does not support -// a way to identify that path field (such as name) matches the field in the request body. -// As a result, genkit generates a command with 2 required same fields, update NAME NAME. -// This override should be removed when genkit supports this. -func updateOverride(cmd *cobra.Command, req *apps.UpdateAppRequest) { - cmd.Use = "update NAME" - cmd.Long = `Update an app. - - Updates the app with the supplied name. - - Arguments: - NAME: The name of the app. The name must contain only lowercase alphanumeric - characters and hyphens. It must be unique within the workspace.` - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) - return check(cmd, args) - } - - updateJson := cmd.Flag("json").Value.(*flags.JsonFlag) - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - diags := updateJson.Unmarshal(&req.App) - if diags.HasError() { - return diags.Error() - } - if len(diags) > 0 { - err := cmdio.RenderDiagnosticsToErrorOut(ctx, diags) - if err != nil { - return err - } - } - } - - req.Name = args[0] - response, err := w.Apps.Update(ctx, *req) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } -} - -func init() { - updateOverrides = append(updateOverrides, updateOverride) -} diff --git a/cmd/workspace/credentials/credentials.go b/cmd/workspace/credentials/credentials.go index 869df0628..44ee0cf31 100755 --- a/cmd/workspace/credentials/credentials.go +++ b/cmd/workspace/credentials/credentials.go @@ -3,6 +3,8 @@ package credentials import ( + "fmt" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -30,9 +32,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "catalog", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods @@ -72,21 +71,39 @@ func newCreateCredential() *cobra.Command { // TODO: complex arg: aws_iam_role // TODO: complex arg: azure_managed_identity + // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&createCredentialReq.Comment, "comment", createCredentialReq.Comment, `Comment associated with the credential.`) - cmd.Flags().StringVar(&createCredentialReq.Name, "name", createCredentialReq.Name, `The credential name.`) - cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE]`) + // TODO: complex arg: gcp_service_account_key + cmd.Flags().Var(&createCredentialReq.Purpose, "purpose", `Indicates the purpose of the credential. Supported values: [SERVICE, STORAGE]`) + cmd.Flags().BoolVar(&createCredentialReq.ReadOnly, "read-only", createCredentialReq.ReadOnly, `Whether the credential is usable only for read operations.`) cmd.Flags().BoolVar(&createCredentialReq.SkipValidation, "skip-validation", createCredentialReq.SkipValidation, `Optional.`) - cmd.Use = "create-credential" + cmd.Use = "create-credential NAME" cmd.Short = `Create a credential.` cmd.Long = `Create a credential. - Creates a new credential.` + Creates a new credential. The type of credential to be created is determined + by the **purpose** field, which should be either **SERVICE** or **STORAGE**. + + The caller must be a metastore admin or have the metastore privilege + **CREATE_STORAGE_CREDENTIAL** for storage credentials, or + **CREATE_SERVICE_CREDENTIAL** for service credentials. + + Arguments: + NAME: The credential name. The name must be unique among storage and service + credentials within the metastore.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) return check(cmd, args) } @@ -107,6 +124,9 @@ func newCreateCredential() *cobra.Command { } } } + if !cmd.Flags().Changed("json") { + createCredentialReq.Name = args[0] + } response, err := w.Credentials.CreateCredential(ctx, createCredentialReq) if err != nil { @@ -143,14 +163,14 @@ func newDeleteCredential() *cobra.Command { // TODO: short flags - cmd.Flags().BoolVar(&deleteCredentialReq.Force, "force", deleteCredentialReq.Force, `Force deletion even if there are dependent services.`) + cmd.Flags().BoolVar(&deleteCredentialReq.Force, "force", deleteCredentialReq.Force, `Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**).`) cmd.Use = "delete-credential NAME_ARG" cmd.Short = `Delete a credential.` cmd.Long = `Delete a credential. - Deletes a credential from the metastore. The caller must be an owner of the - credential. + Deletes a service or storage credential from the metastore. The caller must be + an owner of the credential. Arguments: NAME_ARG: Name of the credential.` @@ -207,20 +227,29 @@ func newGenerateTemporaryServiceCredential() *cobra.Command { cmd.Flags().Var(&generateTemporaryServiceCredentialJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: complex arg: azure_options - cmd.Flags().StringVar(&generateTemporaryServiceCredentialReq.CredentialName, "credential-name", generateTemporaryServiceCredentialReq.CredentialName, `The name of the service credential used to generate a temporary credential.`) - cmd.Use = "generate-temporary-service-credential" + cmd.Use = "generate-temporary-service-credential CREDENTIAL_NAME" cmd.Short = `Generate a temporary service credential.` cmd.Long = `Generate a temporary service credential. Returns a set of temporary credentials generated using the specified service credential. The caller must be a metastore admin or have the metastore - privilege **ACCESS** on the service credential.` + privilege **ACCESS** on the service credential. + + Arguments: + CREDENTIAL_NAME: The name of the service credential used to generate a temporary credential` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(0) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'credential_name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) return check(cmd, args) } @@ -241,6 +270,9 @@ func newGenerateTemporaryServiceCredential() *cobra.Command { } } } + if !cmd.Flags().Changed("json") { + generateTemporaryServiceCredentialReq.CredentialName = args[0] + } response, err := w.Credentials.GenerateTemporaryServiceCredential(ctx, generateTemporaryServiceCredentialReq) if err != nil { @@ -281,8 +313,9 @@ func newGetCredential() *cobra.Command { cmd.Short = `Get a credential.` cmd.Long = `Get a credential. - Gets a credential from the metastore. The caller must be a metastore admin, - the owner of the credential, or have any permission on the credential. + Gets a service or storage credential from the metastore. The caller must be a + metastore admin, the owner of the credential, or have any permission on the + credential. Arguments: NAME_ARG: Name of the credential.` @@ -338,7 +371,7 @@ func newListCredentials() *cobra.Command { cmd.Flags().IntVar(&listCredentialsReq.MaxResults, "max-results", listCredentialsReq.MaxResults, `Maximum number of credentials to return.`) cmd.Flags().StringVar(&listCredentialsReq.PageToken, "page-token", listCredentialsReq.PageToken, `Opaque token to retrieve the next page of results.`) - cmd.Flags().Var(&listCredentialsReq.Purpose, "purpose", `Return only credentials for the specified purpose. Supported values: [SERVICE]`) + cmd.Flags().Var(&listCredentialsReq.Purpose, "purpose", `Return only credentials for the specified purpose. Supported values: [SERVICE, STORAGE]`) cmd.Use = "list-credentials" cmd.Short = `List credentials.` @@ -399,18 +432,20 @@ func newUpdateCredential() *cobra.Command { // TODO: complex arg: aws_iam_role // TODO: complex arg: azure_managed_identity + // TODO: complex arg: azure_service_principal cmd.Flags().StringVar(&updateCredentialReq.Comment, "comment", updateCredentialReq.Comment, `Comment associated with the credential.`) - cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force update even if there are dependent services.`) + cmd.Flags().BoolVar(&updateCredentialReq.Force, "force", updateCredentialReq.Force, `Force an update even if there are dependent services (when purpose is **SERVICE**) or dependent external locations and external tables (when purpose is **STORAGE**).`) cmd.Flags().Var(&updateCredentialReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateCredentialReq.NewName, "new-name", updateCredentialReq.NewName, `New name of credential.`) cmd.Flags().StringVar(&updateCredentialReq.Owner, "owner", updateCredentialReq.Owner, `Username of current owner of credential.`) + cmd.Flags().BoolVar(&updateCredentialReq.ReadOnly, "read-only", updateCredentialReq.ReadOnly, `Whether the credential is usable only for read operations.`) cmd.Flags().BoolVar(&updateCredentialReq.SkipValidation, "skip-validation", updateCredentialReq.SkipValidation, `Supply true to this argument to skip validation of the updated credential.`) cmd.Use = "update-credential NAME_ARG" cmd.Short = `Update a credential.` cmd.Long = `Update a credential. - Updates a credential on the metastore. + Updates a service or storage credential on the metastore. The caller must be the owner of the credential or a metastore admin or have the MANAGE permission. If the caller is a metastore admin, only the @@ -485,7 +520,10 @@ func newValidateCredential() *cobra.Command { // TODO: complex arg: aws_iam_role // TODO: complex arg: azure_managed_identity cmd.Flags().StringVar(&validateCredentialReq.CredentialName, "credential-name", validateCredentialReq.CredentialName, `Required.`) - cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE]`) + cmd.Flags().StringVar(&validateCredentialReq.ExternalLocationName, "external-location-name", validateCredentialReq.ExternalLocationName, `The name of an existing external location to validate.`) + cmd.Flags().Var(&validateCredentialReq.Purpose, "purpose", `The purpose of the credential. Supported values: [SERVICE, STORAGE]`) + cmd.Flags().BoolVar(&validateCredentialReq.ReadOnly, "read-only", validateCredentialReq.ReadOnly, `Whether the credential is only usable for read operations.`) + cmd.Flags().StringVar(&validateCredentialReq.Url, "url", validateCredentialReq.Url, `The external location url to validate.`) cmd.Use = "validate-credential" cmd.Short = `Validate a credential.` @@ -493,10 +531,19 @@ func newValidateCredential() *cobra.Command { Validates a credential. - Either the __credential_name__ or the cloud-specific credential must be - provided. + For service credentials (purpose is **SERVICE**), either the + __credential_name__ or the cloud-specific credential must be provided. - The caller must be a metastore admin or the credential owner.` + For storage credentials (purpose is **STORAGE**), at least one of + __external_location_name__ and __url__ need to be provided. If only one of + them is provided, it will be used for validation. And if both are provided, + the __url__ will be used for validation, and __external_location_name__ will + be ignored when checking overlapping urls. Either the __credential_name__ or + the cloud-specific credential must be provided. + + The caller must be a metastore admin or the credential owner or have the + required permission on the metastore and the credential (e.g., + **CREATE_EXTERNAL_LOCATION** when purpose is **STORAGE**).` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 239c72b6e..35c3bdf4e 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -503,7 +503,7 @@ func newGetPublished() *cobra.Command { Get the current published dashboard. Arguments: - DASHBOARD_ID: UUID identifying the dashboard to be published.` + DASHBOARD_ID: UUID identifying the published dashboard.` cmd.Annotations = make(map[string]string) @@ -737,7 +737,7 @@ func newListSchedules() *cobra.Command { cmd.Long = `List dashboard schedules. Arguments: - DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` + DASHBOARD_ID: UUID identifying the dashboard to which the schedules belongs.` // This command is being previewed; hide from help output. cmd.Hidden = true @@ -795,8 +795,8 @@ func newListSubscriptions() *cobra.Command { cmd.Long = `List schedule subscriptions. Arguments: - DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. - SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` + DASHBOARD_ID: UUID identifying the dashboard which the subscriptions belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscriptions belongs.` // This command is being previewed; hide from help output. cmd.Hidden = true @@ -1072,7 +1072,7 @@ func newUnpublish() *cobra.Command { Unpublish the dashboard. Arguments: - DASHBOARD_ID: UUID identifying the dashboard to be published.` + DASHBOARD_ID: UUID identifying the published dashboard.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/notification-destinations/notification-destinations.go b/cmd/workspace/notification-destinations/notification-destinations.go index 470765879..b06652c71 100755 --- a/cmd/workspace/notification-destinations/notification-destinations.go +++ b/cmd/workspace/notification-destinations/notification-destinations.go @@ -304,7 +304,10 @@ func newUpdate() *cobra.Command { cmd.Long = `Update a notification destination. Updates a notification destination. Requires workspace admin permissions. At - least one field is required in the request body.` + least one field is required in the request body. + + Arguments: + ID: UUID identifying notification destination.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 7dcb13538..799472650 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -171,7 +171,7 @@ func newDelete() *cobra.Command { Deletes the specified repo. Arguments: - REPO_ID: ID of the Git folder (repo) object in the workspace.` + REPO_ID: The ID for the corresponding repo to delete.` cmd.Annotations = make(map[string]string) @@ -188,14 +188,14 @@ func newDelete() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Repos drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "ID of the Git folder (repo) object in the workspace") + id, err := cmdio.Select(ctx, names, "The ID for the corresponding repo to delete") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have id of the git folder (repo) object in the workspace") + return fmt.Errorf("expected to have the id for the corresponding repo to delete") } _, err = fmt.Sscan(args[0], &deleteReq.RepoId) if err != nil { diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index c8d57fd6d..fcc70c126 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -169,7 +169,7 @@ func newDelete() *cobra.Command { Deletes a token, specified by its ID. Arguments: - TOKEN_ID: The ID of the token to get.` + TOKEN_ID: The ID of the token to revoke.` cmd.Annotations = make(map[string]string) @@ -186,14 +186,14 @@ func newDelete() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Token Management drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "The ID of the token to get") + id, err := cmdio.Select(ctx, names, "The ID of the token to revoke") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have the id of the token to get") + return fmt.Errorf("expected to have the id of the token to revoke") } deleteReq.TokenId = args[0] diff --git a/go.mod b/go.mod index 353313131..7141ed768 100644 --- a/go.mod +++ b/go.mod @@ -7,7 +7,7 @@ toolchain go1.23.2 require ( github.com/Masterminds/semver/v3 v3.3.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.51.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.52.0 // Apache 2.0 github.com/fatih/color v1.18.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index 312a4d934..5d2c53a37 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.51.0 h1:tcvB9TID3oUl0O8npccB5c+33tarBiYMBFbq4U4AB6M= -github.com/databricks/databricks-sdk-go v0.51.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.52.0 h1:WKcj0F+pdx0gjI5xMicjYC4O43S2q5nyTpaGGMFmgHw= +github.com/databricks/databricks-sdk-go v0.52.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=