From a38e16c6547b74edd2a0c8260c35c9869cc36f57 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 12 Aug 2024 14:49:53 +0200 Subject: [PATCH 01/36] Bump golang.org/x/term from 0.22.0 to 0.23.0 (#1669) Bumps [golang.org/x/term](https://github.com/golang/term) from 0.22.0 to 0.23.0.
Commits

[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=golang.org/x/term&package-manager=go_modules&previous-version=0.22.0&new-version=0.23.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- go.mod | 4 ++-- go.sum | 8 ++++---- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/go.mod b/go.mod index aa05ffbc5..3f5af0815 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/mod v0.20.0 golang.org/x/oauth2 v0.22.0 golang.org/x/sync v0.8.0 - golang.org/x/term v0.22.0 + golang.org/x/term v0.23.0 golang.org/x/text v0.17.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 @@ -62,7 +62,7 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.23.0 // indirect golang.org/x/net v0.25.0 // indirect - golang.org/x/sys v0.22.0 // indirect + golang.org/x/sys v0.23.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.182.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect diff --git a/go.sum b/go.sum index 3f291b4ce..f33a9562a 100644 --- a/go.sum +++ b/go.sum @@ -212,10 +212,10 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= -golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= -golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= +golang.org/x/sys v0.23.0 h1:YfKFowiIMvtgl1UERQoTPPToxltDeZfbj4H7dVUCwmM= +golang.org/x/sys v0.23.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.23.0 h1:F6D4vR+EHoL9/sWAWgAR1H2DcHr4PareCbAaCo1RpuU= +golang.org/x/term v0.23.0/go.mod h1:DgV24QBUrK6jhZXl+20l6UWznPlwAHm1Q1mGHtydmSk= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.17.0 h1:XtiM5bkSOt+ewxlOE/aE/AKEHibwj/6gvWMl9Rsh0Qc= From ad8e61c73925b9f1b24cbc63ac1bc5b51348dbe0 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 12 Aug 2024 16:20:04 +0200 Subject: [PATCH 02/36] Fix ability to import the CLI repository as module (#1671) ## Changes While investigating #1629, I found that Go doesn't allow characters outside the set documented at https://pkg.go.dev/golang.org/x/mod/module#CheckFilePath. To fix this, I changed the relevant test case to create the fixtures it needs instead of loading it from the `testdata` directory (in `renderer_test.go`). Some test cases in `config_test.go` depended on templated paths without needing to do so. In the process of fixing this, I refactored these tests slightly to reduce dependencies between them. This change also adds a test case to ensure that all files in the repository are allowed to be part of a module (per the earlier `CheckFilePath` function). Fixes #1629. ## Tests I manually confirmed I could import the repository as a Go module. --- internal/testutil/copy.go | 48 ++++++ libs/template/config_test.go | 153 +++++++++++------- libs/template/renderer_test.go | 23 ++- .../schema.json | 24 +++ .../schema.json | 20 +++ .../schema.json | 20 +++ .../config-assign-from-file/schema.json | 20 +++ .../schema.json | 24 +++ .../template/library/my_funcs.tmpl | 3 + .../template/template/.gitkeep} | 0 .../config-test-schema/test-schema.json | 6 +- .../library/.gitkeep} | 0 .../template/testdata/empty/template/.gitkeep | 0 .../template-in-path/template/.gitkeep | 0 .../templated-defaults/library/my_funcs.tmpl | 7 - main_test.go | 25 +++ 16 files changed, 297 insertions(+), 76 deletions(-) create mode 100644 internal/testutil/copy.go create mode 100644 libs/template/testdata/config-assign-from-default-value/schema.json create mode 100644 libs/template/testdata/config-assign-from-file-invalid-int/schema.json create mode 100644 libs/template/testdata/config-assign-from-file-unknown-property/schema.json create mode 100644 libs/template/testdata/config-assign-from-file/schema.json create mode 100644 libs/template/testdata/config-assign-from-templated-default-value/schema.json create mode 100644 libs/template/testdata/config-assign-from-templated-default-value/template/library/my_funcs.tmpl rename libs/template/testdata/{template-in-path/template/{{template `dir_name`}}/{{template `file_name`}} => config-assign-from-templated-default-value/template/template/.gitkeep} (100%) rename libs/template/testdata/{templated-defaults/template/{{template `dir_name`}}/{{template `file_name`}} => empty/library/.gitkeep} (100%) create mode 100644 libs/template/testdata/empty/template/.gitkeep create mode 100644 libs/template/testdata/template-in-path/template/.gitkeep delete mode 100644 libs/template/testdata/templated-defaults/library/my_funcs.tmpl diff --git a/internal/testutil/copy.go b/internal/testutil/copy.go new file mode 100644 index 000000000..21faece00 --- /dev/null +++ b/internal/testutil/copy.go @@ -0,0 +1,48 @@ +package testutil + +import ( + "io" + "io/fs" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +// CopyDirectory copies the contents of a directory to another directory. +// The destination directory is created if it does not exist. +func CopyDirectory(t *testing.T, src, dst string) { + err := filepath.WalkDir(src, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + + rel, err := filepath.Rel(src, path) + require.NoError(t, err) + + if d.IsDir() { + return os.MkdirAll(filepath.Join(dst, rel), 0755) + } + + // Copy the file to the temporary directory + in, err := os.Open(path) + if err != nil { + return err + } + + defer in.Close() + + out, err := os.Create(filepath.Join(dst, rel)) + if err != nil { + return err + } + + defer out.Close() + + _, err = io.Copy(out, in) + return err + }) + + require.NoError(t, err) +} diff --git a/libs/template/config_test.go b/libs/template/config_test.go index 1af2e5f5a..73b47f289 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -3,59 +3,70 @@ package template import ( "context" "fmt" + "path/filepath" "testing" "text/template" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/jsonschema" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func testConfig(t *testing.T) *config { - c, err := newConfig(context.Background(), "./testdata/config-test-schema/test-schema.json") - require.NoError(t, err) - return c -} - func TestTemplateConfigAssignValuesFromFile(t *testing.T) { - c := testConfig(t) + testDir := "./testdata/config-assign-from-file" - err := c.assignValuesFromFile("./testdata/config-assign-from-file/config.json") - assert.NoError(t, err) + ctx := context.Background() + c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + require.NoError(t, err) - assert.Equal(t, int64(1), c.values["int_val"]) - assert.Equal(t, float64(2), c.values["float_val"]) - assert.Equal(t, true, c.values["bool_val"]) - assert.Equal(t, "hello", c.values["string_val"]) -} - -func TestTemplateConfigAssignValuesFromFileForInvalidIntegerValue(t *testing.T) { - c := testConfig(t) - - err := c.assignValuesFromFile("./testdata/config-assign-from-file-invalid-int/config.json") - assert.EqualError(t, err, "failed to load config from file ./testdata/config-assign-from-file-invalid-int/config.json: failed to parse property int_val: cannot convert \"abc\" to an integer") + err = c.assignValuesFromFile(filepath.Join(testDir, "config.json")) + if assert.NoError(t, err) { + assert.Equal(t, int64(1), c.values["int_val"]) + assert.Equal(t, float64(2), c.values["float_val"]) + assert.Equal(t, true, c.values["bool_val"]) + assert.Equal(t, "hello", c.values["string_val"]) + } } func TestTemplateConfigAssignValuesFromFileDoesNotOverwriteExistingConfigs(t *testing.T) { - c := testConfig(t) + testDir := "./testdata/config-assign-from-file" + + ctx := context.Background() + c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + require.NoError(t, err) + c.values = map[string]any{ "string_val": "this-is-not-overwritten", } - err := c.assignValuesFromFile("./testdata/config-assign-from-file/config.json") - assert.NoError(t, err) + err = c.assignValuesFromFile(filepath.Join(testDir, "config.json")) + if assert.NoError(t, err) { + assert.Equal(t, int64(1), c.values["int_val"]) + assert.Equal(t, float64(2), c.values["float_val"]) + assert.Equal(t, true, c.values["bool_val"]) + assert.Equal(t, "this-is-not-overwritten", c.values["string_val"]) + } +} - assert.Equal(t, int64(1), c.values["int_val"]) - assert.Equal(t, float64(2), c.values["float_val"]) - assert.Equal(t, true, c.values["bool_val"]) - assert.Equal(t, "this-is-not-overwritten", c.values["string_val"]) +func TestTemplateConfigAssignValuesFromFileForInvalidIntegerValue(t *testing.T) { + testDir := "./testdata/config-assign-from-file-invalid-int" + + ctx := context.Background() + c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + require.NoError(t, err) + + err = c.assignValuesFromFile(filepath.Join(testDir, "config.json")) + assert.EqualError(t, err, fmt.Sprintf("failed to load config from file %s: failed to parse property int_val: cannot convert \"abc\" to an integer", filepath.Join(testDir, "config.json"))) } func TestTemplateConfigAssignValuesFromFileFiltersPropertiesNotInTheSchema(t *testing.T) { - c := testConfig(t) + testDir := "./testdata/config-assign-from-file-unknown-property" - err := c.assignValuesFromFile("./testdata/config-assign-from-file-unknown-property/config.json") + ctx := context.Background() + c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + require.NoError(t, err) + + err = c.assignValuesFromFile(filepath.Join(testDir, "config.json")) assert.NoError(t, err) // assert only the known property is loaded @@ -63,37 +74,66 @@ func TestTemplateConfigAssignValuesFromFileFiltersPropertiesNotInTheSchema(t *te assert.Equal(t, "i am a known property", c.values["string_val"]) } -func TestTemplateConfigAssignDefaultValues(t *testing.T) { - c := testConfig(t) +func TestTemplateConfigAssignValuesFromDefaultValues(t *testing.T) { + testDir := "./testdata/config-assign-from-default-value" ctx := context.Background() - ctx = root.SetWorkspaceClient(ctx, nil) - helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/template-in-path/template", "./testdata/template-in-path/library", t.TempDir()) + c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + require.NoError(t, err) + + r, err := newRenderer(ctx, nil, nil, "./testdata/empty/template", "./testdata/empty/library", t.TempDir()) require.NoError(t, err) err = c.assignDefaultValues(r) - assert.NoError(t, err) + if assert.NoError(t, err) { + assert.Equal(t, int64(123), c.values["int_val"]) + assert.Equal(t, float64(123), c.values["float_val"]) + assert.Equal(t, true, c.values["bool_val"]) + assert.Equal(t, "hello", c.values["string_val"]) + } +} - assert.Len(t, c.values, 2) - assert.Equal(t, "my_file", c.values["string_val"]) - assert.Equal(t, int64(123), c.values["int_val"]) +func TestTemplateConfigAssignValuesFromTemplatedDefaultValues(t *testing.T) { + testDir := "./testdata/config-assign-from-templated-default-value" + + ctx := context.Background() + c, err := newConfig(ctx, filepath.Join(testDir, "schema.json")) + require.NoError(t, err) + + r, err := newRenderer(ctx, nil, nil, filepath.Join(testDir, "template/template"), filepath.Join(testDir, "template/library"), t.TempDir()) + require.NoError(t, err) + + // Note: only the string value is templated. + // The JSON schema package doesn't allow using a string default for integer types. + err = c.assignDefaultValues(r) + if assert.NoError(t, err) { + assert.Equal(t, int64(123), c.values["int_val"]) + assert.Equal(t, float64(123), c.values["float_val"]) + assert.Equal(t, true, c.values["bool_val"]) + assert.Equal(t, "world", c.values["string_val"]) + } } func TestTemplateConfigValidateValuesDefined(t *testing.T) { - c := testConfig(t) + ctx := context.Background() + c, err := newConfig(ctx, "testdata/config-test-schema/test-schema.json") + require.NoError(t, err) + c.values = map[string]any{ "int_val": 1, "float_val": 1.0, "bool_val": false, } - err := c.validate() + err = c.validate() assert.EqualError(t, err, "validation for template input parameters failed. no value provided for required property string_val") } func TestTemplateConfigValidateTypeForValidConfig(t *testing.T) { - c := testConfig(t) + ctx := context.Background() + c, err := newConfig(ctx, "testdata/config-test-schema/test-schema.json") + require.NoError(t, err) + c.values = map[string]any{ "int_val": 1, "float_val": 1.1, @@ -101,12 +141,15 @@ func TestTemplateConfigValidateTypeForValidConfig(t *testing.T) { "string_val": "abcd", } - err := c.validate() + err = c.validate() assert.NoError(t, err) } func TestTemplateConfigValidateTypeForUnknownField(t *testing.T) { - c := testConfig(t) + ctx := context.Background() + c, err := newConfig(ctx, "testdata/config-test-schema/test-schema.json") + require.NoError(t, err) + c.values = map[string]any{ "unknown_prop": 1, "int_val": 1, @@ -115,12 +158,15 @@ func TestTemplateConfigValidateTypeForUnknownField(t *testing.T) { "string_val": "abcd", } - err := c.validate() + err = c.validate() assert.EqualError(t, err, "validation for template input parameters failed. property unknown_prop is not defined in the schema") } func TestTemplateConfigValidateTypeForInvalidType(t *testing.T) { - c := testConfig(t) + ctx := context.Background() + c, err := newConfig(ctx, "testdata/config-test-schema/test-schema.json") + require.NoError(t, err) + c.values = map[string]any{ "int_val": "this-should-be-an-int", "float_val": 1.1, @@ -128,7 +174,7 @@ func TestTemplateConfigValidateTypeForInvalidType(t *testing.T) { "string_val": "abcd", } - err := c.validate() + err = c.validate() assert.EqualError(t, err, "validation for template input parameters failed. incorrect type for property int_val: expected type integer, but value is \"this-should-be-an-int\"") } @@ -224,19 +270,6 @@ func TestTemplateEnumValidation(t *testing.T) { assert.NoError(t, c.validate()) } -func TestAssignDefaultValuesWithTemplatedDefaults(t *testing.T) { - c := testConfig(t) - ctx := context.Background() - ctx = root.SetWorkspaceClient(ctx, nil) - helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/templated-defaults/template", "./testdata/templated-defaults/library", t.TempDir()) - require.NoError(t, err) - - err = c.assignDefaultValues(r) - assert.NoError(t, err) - assert.Equal(t, "my_file", c.values["string_val"]) -} - func TestTemplateSchemaErrorsWithEmptyDescription(t *testing.T) { _, err := newConfig(context.Background(), "./testdata/config-test-schema/invalid-test-schema.json") assert.EqualError(t, err, "template property property-without-description is missing a description") diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index a8678a525..92133c5fe 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -16,6 +16,7 @@ import ( bundleConfig "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/tags" "github.com/databricks/databricks-sdk-go" @@ -655,15 +656,27 @@ func TestRendererFileTreeRendering(t *testing.T) { func TestRendererSubTemplateInPath(t *testing.T) { ctx := context.Background() ctx = root.SetWorkspaceClient(ctx, nil) - tmpDir := t.TempDir() - helpers := loadHelpers(ctx) - r, err := newRenderer(ctx, nil, helpers, "./testdata/template-in-path/template", "./testdata/template-in-path/library", tmpDir) + // Copy the template directory to a temporary directory where we can safely include a templated file path. + // These paths include characters that are forbidden in Go modules, so we can't use the testdata directory. + // Also see https://github.com/databricks/cli/pull/1671. + templateDir := t.TempDir() + testutil.CopyDirectory(t, "./testdata/template-in-path", templateDir) + + // Use a backtick-quoted string; double quotes are a reserved character for Windows paths: + // https://learn.microsoft.com/en-us/windows/win32/fileio/naming-a-file. + testutil.Touch(t, filepath.Join(templateDir, "template/{{template `dir_name`}}/{{template `file_name`}}")) + + tmpDir := t.TempDir() + r, err := newRenderer(ctx, nil, nil, filepath.Join(templateDir, "template"), filepath.Join(templateDir, "library"), tmpDir) require.NoError(t, err) err = r.walk() require.NoError(t, err) - assert.Equal(t, filepath.Join(tmpDir, "my_directory", "my_file"), r.files[0].DstPath().absPath()) - assert.Equal(t, "my_directory/my_file", r.files[0].DstPath().relPath) + if assert.Len(t, r.files, 2) { + f := r.files[1] + assert.Equal(t, filepath.Join(tmpDir, "my_directory", "my_file"), f.DstPath().absPath()) + assert.Equal(t, "my_directory/my_file", f.DstPath().relPath) + } } diff --git a/libs/template/testdata/config-assign-from-default-value/schema.json b/libs/template/testdata/config-assign-from-default-value/schema.json new file mode 100644 index 000000000..259bb9a7f --- /dev/null +++ b/libs/template/testdata/config-assign-from-default-value/schema.json @@ -0,0 +1,24 @@ +{ + "properties": { + "int_val": { + "type": "integer", + "description": "This is an integer value", + "default": 123 + }, + "float_val": { + "type": "number", + "description": "This is a float value", + "default": 123 + }, + "bool_val": { + "type": "boolean", + "description": "This is a boolean value", + "default": true + }, + "string_val": { + "type": "string", + "description": "This is a string value", + "default": "hello" + } + } +} diff --git a/libs/template/testdata/config-assign-from-file-invalid-int/schema.json b/libs/template/testdata/config-assign-from-file-invalid-int/schema.json new file mode 100644 index 000000000..80c44d6d9 --- /dev/null +++ b/libs/template/testdata/config-assign-from-file-invalid-int/schema.json @@ -0,0 +1,20 @@ +{ + "properties": { + "int_val": { + "type": "integer", + "description": "This is an integer value" + }, + "float_val": { + "type": "number", + "description": "This is a float value" + }, + "bool_val": { + "type": "boolean", + "description": "This is a boolean value" + }, + "string_val": { + "type": "string", + "description": "This is a string value" + } + } +} diff --git a/libs/template/testdata/config-assign-from-file-unknown-property/schema.json b/libs/template/testdata/config-assign-from-file-unknown-property/schema.json new file mode 100644 index 000000000..80c44d6d9 --- /dev/null +++ b/libs/template/testdata/config-assign-from-file-unknown-property/schema.json @@ -0,0 +1,20 @@ +{ + "properties": { + "int_val": { + "type": "integer", + "description": "This is an integer value" + }, + "float_val": { + "type": "number", + "description": "This is a float value" + }, + "bool_val": { + "type": "boolean", + "description": "This is a boolean value" + }, + "string_val": { + "type": "string", + "description": "This is a string value" + } + } +} diff --git a/libs/template/testdata/config-assign-from-file/schema.json b/libs/template/testdata/config-assign-from-file/schema.json new file mode 100644 index 000000000..80c44d6d9 --- /dev/null +++ b/libs/template/testdata/config-assign-from-file/schema.json @@ -0,0 +1,20 @@ +{ + "properties": { + "int_val": { + "type": "integer", + "description": "This is an integer value" + }, + "float_val": { + "type": "number", + "description": "This is a float value" + }, + "bool_val": { + "type": "boolean", + "description": "This is a boolean value" + }, + "string_val": { + "type": "string", + "description": "This is a string value" + } + } +} diff --git a/libs/template/testdata/config-assign-from-templated-default-value/schema.json b/libs/template/testdata/config-assign-from-templated-default-value/schema.json new file mode 100644 index 000000000..fe664430b --- /dev/null +++ b/libs/template/testdata/config-assign-from-templated-default-value/schema.json @@ -0,0 +1,24 @@ +{ + "properties": { + "int_val": { + "type": "integer", + "description": "This is an integer value", + "default": 123 + }, + "float_val": { + "type": "number", + "description": "This is a float value", + "default": 123 + }, + "bool_val": { + "type": "boolean", + "description": "This is a boolean value", + "default": true + }, + "string_val": { + "type": "string", + "description": "This is a string value", + "default": "{{ template \"string_val\" }}" + } + } +} diff --git a/libs/template/testdata/config-assign-from-templated-default-value/template/library/my_funcs.tmpl b/libs/template/testdata/config-assign-from-templated-default-value/template/library/my_funcs.tmpl new file mode 100644 index 000000000..41c50d7e5 --- /dev/null +++ b/libs/template/testdata/config-assign-from-templated-default-value/template/library/my_funcs.tmpl @@ -0,0 +1,3 @@ +{{define "string_val" -}} +world +{{- end}} diff --git a/libs/template/testdata/template-in-path/template/{{template `dir_name`}}/{{template `file_name`}} b/libs/template/testdata/config-assign-from-templated-default-value/template/template/.gitkeep similarity index 100% rename from libs/template/testdata/template-in-path/template/{{template `dir_name`}}/{{template `file_name`}} rename to libs/template/testdata/config-assign-from-templated-default-value/template/template/.gitkeep diff --git a/libs/template/testdata/config-test-schema/test-schema.json b/libs/template/testdata/config-test-schema/test-schema.json index 10f8652f4..80c44d6d9 100644 --- a/libs/template/testdata/config-test-schema/test-schema.json +++ b/libs/template/testdata/config-test-schema/test-schema.json @@ -2,8 +2,7 @@ "properties": { "int_val": { "type": "integer", - "description": "This is an integer value", - "default": 123 + "description": "This is an integer value" }, "float_val": { "type": "number", @@ -15,8 +14,7 @@ }, "string_val": { "type": "string", - "description": "This is a string value", - "default": "{{template \"file_name\"}}" + "description": "This is a string value" } } } diff --git a/libs/template/testdata/templated-defaults/template/{{template `dir_name`}}/{{template `file_name`}} b/libs/template/testdata/empty/library/.gitkeep similarity index 100% rename from libs/template/testdata/templated-defaults/template/{{template `dir_name`}}/{{template `file_name`}} rename to libs/template/testdata/empty/library/.gitkeep diff --git a/libs/template/testdata/empty/template/.gitkeep b/libs/template/testdata/empty/template/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/testdata/template-in-path/template/.gitkeep b/libs/template/testdata/template-in-path/template/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/testdata/templated-defaults/library/my_funcs.tmpl b/libs/template/testdata/templated-defaults/library/my_funcs.tmpl deleted file mode 100644 index 3415ad774..000000000 --- a/libs/template/testdata/templated-defaults/library/my_funcs.tmpl +++ /dev/null @@ -1,7 +0,0 @@ -{{define "dir_name" -}} -my_directory -{{- end}} - -{{define "file_name" -}} -my_file -{{- end}} diff --git a/main_test.go b/main_test.go index 34ecdca0f..dea82e9b9 100644 --- a/main_test.go +++ b/main_test.go @@ -2,11 +2,14 @@ package main import ( "context" + "io/fs" + "path/filepath" "testing" "github.com/databricks/cli/cmd" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" + "golang.org/x/mod/module" ) func TestCommandsDontUseUnderscoreInName(t *testing.T) { @@ -23,3 +26,25 @@ func TestCommandsDontUseUnderscoreInName(t *testing.T) { queue = append(queue[1:], cmd.Commands()...) } } + +func TestFilePath(t *testing.T) { + // To import this repository as a library, all files must match the + // file path constraints made by Go. This test ensures that all files + // in the repository have a valid file path. + // + // See https://github.com/databricks/cli/issues/1629 + // + err := filepath.WalkDir(".", func(path string, _ fs.DirEntry, err error) error { + switch path { + case ".": + return nil + case ".git": + return filepath.SkipDir + } + if assert.NoError(t, err) { + assert.NoError(t, module.CheckFilePath(filepath.ToSlash(path))) + } + return nil + }) + assert.NoError(t, err) +} From 7ae80de351ae57f5dcc826d4ee3df7651dbe1f00 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 13 Aug 2024 18:20:15 +0530 Subject: [PATCH 03/36] Stop tracking file path locations in bundle resources (#1673) ## Changes Since locations are already tracked in the dynamic value tree, we no longer need to track it at the resource/artifact level. This PR: 1. Removes use of `paths.Paths`. Uses dyn.Location instead. 2. Refactors the validation of resources not being empty valued to be generic across all resource types. ## Tests Existing unit tests. --- bundle/artifacts/prepare.go | 5 +- bundle/config/artifact.go | 9 --- bundle/config/mutator/trampoline_test.go | 4 - bundle/config/paths/paths.go | 22 ------ bundle/config/resources.go | 77 ++----------------- bundle/config/resources/job.go | 12 --- bundle/config/resources/mlflow_experiment.go | 12 --- bundle/config/resources/mlflow_model.go | 12 --- .../resources/model_serving_endpoint.go | 14 ---- bundle/config/resources/pipeline.go | 12 --- bundle/config/resources/quality_monitor.go | 14 ---- bundle/config/resources/registered_model.go | 14 ---- bundle/config/root.go | 20 ----- .../validate/all_resources_have_values.go | 47 +++++++++++ bundle/deploy/metadata/compute.go | 3 +- bundle/internal/bundletest/location.go | 2 - bundle/phases/initialize.go | 2 + bundle/python/transform_test.go | 4 - .../environments_job_and_pipeline_test.go | 12 ++- bundle/tests/include_test.go | 9 ++- bundle/tests/job_and_pipeline_test.go | 5 -- bundle/tests/model_serving_endpoint_test.go | 2 - bundle/tests/registered_model_test.go | 2 - bundle/tests/undefined_job_test.go | 12 ++- .../tests/undefined_pipeline/databricks.yml | 8 ++ libs/dyn/convert/to_typed.go | 6 ++ 26 files changed, 98 insertions(+), 243 deletions(-) delete mode 100644 bundle/config/paths/paths.go create mode 100644 bundle/config/validate/all_resources_have_values.go create mode 100644 bundle/tests/undefined_pipeline/databricks.yml diff --git a/bundle/artifacts/prepare.go b/bundle/artifacts/prepare.go index 493e8f7a8..fb61ed9e2 100644 --- a/bundle/artifacts/prepare.go +++ b/bundle/artifacts/prepare.go @@ -34,11 +34,13 @@ func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics return diag.Errorf("artifact doesn't exist: %s", m.name) } + l := b.Config.GetLocation("artifacts." + m.name) + dirPath := filepath.Dir(l.File) + // Check if source paths are absolute, if not, make them absolute for k := range artifact.Files { f := &artifact.Files[k] if !filepath.IsAbs(f.Source) { - dirPath := filepath.Dir(artifact.ConfigFilePath) f.Source = filepath.Join(dirPath, f.Source) } } @@ -49,7 +51,6 @@ func (m *prepare) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics } if !filepath.IsAbs(artifact.Path) { - dirPath := filepath.Dir(artifact.ConfigFilePath) artifact.Path = filepath.Join(dirPath, artifact.Path) } diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index 219def571..9a5690f57 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -4,18 +4,11 @@ import ( "context" "fmt" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/exec" ) type Artifacts map[string]*Artifact -func (artifacts Artifacts) ConfigureConfigFilePath() { - for _, artifact := range artifacts { - artifact.ConfigureConfigFilePath() - } -} - type ArtifactType string const ArtifactPythonWheel ArtifactType = `whl` @@ -40,8 +33,6 @@ type Artifact struct { BuildCommand string `json:"build,omitempty"` Executable exec.ExecutableType `json:"executable,omitempty"` - - paths.Paths } func (a *Artifact) Build(ctx context.Context) ([]byte, error) { diff --git a/bundle/config/mutator/trampoline_test.go b/bundle/config/mutator/trampoline_test.go index e39076647..de395c165 100644 --- a/bundle/config/mutator/trampoline_test.go +++ b/bundle/config/mutator/trampoline_test.go @@ -9,7 +9,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" @@ -65,9 +64,6 @@ func TestGenerateTrampoline(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test": { - Paths: paths.Paths{ - ConfigFilePath: tmpDir, - }, JobSettings: &jobs.JobSettings{ Tasks: tasks, }, diff --git a/bundle/config/paths/paths.go b/bundle/config/paths/paths.go deleted file mode 100644 index 95977ee37..000000000 --- a/bundle/config/paths/paths.go +++ /dev/null @@ -1,22 +0,0 @@ -package paths - -import ( - "github.com/databricks/cli/libs/dyn" -) - -type Paths struct { - // Absolute path on the local file system to the configuration file that holds - // the definition of this resource. - ConfigFilePath string `json:"-" bundle:"readonly"` - - // DynamicValue stores the [dyn.Value] of the containing struct. - // This assumes that this struct is always embedded. - DynamicValue dyn.Value `json:"-"` -} - -func (p *Paths) ConfigureConfigFilePath() { - if !p.DynamicValue.IsValid() { - panic("DynamicValue not set") - } - p.ConfigFilePath = p.DynamicValue.Location().File -} diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 6c7a927f2..22d69ffb5 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -21,81 +21,14 @@ type Resources struct { Schemas map[string]*resources.Schema `json:"schemas,omitempty"` } -type resource struct { - resource ConfigResource - resource_type string - key string -} - -func (r *Resources) allResources() []resource { - all := make([]resource, 0) - for k, e := range r.Jobs { - all = append(all, resource{resource_type: "job", resource: e, key: k}) - } - for k, e := range r.Pipelines { - all = append(all, resource{resource_type: "pipeline", resource: e, key: k}) - } - for k, e := range r.Models { - all = append(all, resource{resource_type: "model", resource: e, key: k}) - } - for k, e := range r.Experiments { - all = append(all, resource{resource_type: "experiment", resource: e, key: k}) - } - for k, e := range r.ModelServingEndpoints { - all = append(all, resource{resource_type: "serving endpoint", resource: e, key: k}) - } - for k, e := range r.RegisteredModels { - all = append(all, resource{resource_type: "registered model", resource: e, key: k}) - } - for k, e := range r.QualityMonitors { - all = append(all, resource{resource_type: "quality monitor", resource: e, key: k}) - } - return all -} - -func (r *Resources) VerifyAllResourcesDefined() error { - all := r.allResources() - for _, e := range all { - err := e.resource.Validate() - if err != nil { - return fmt.Errorf("%s %s is not defined", e.resource_type, e.key) - } - } - - return nil -} - -// ConfigureConfigFilePath sets the specified path for all resources contained in this instance. -// This property is used to correctly resolve paths relative to the path -// of the configuration file they were defined in. -func (r *Resources) ConfigureConfigFilePath() { - for _, e := range r.Jobs { - e.ConfigureConfigFilePath() - } - for _, e := range r.Pipelines { - e.ConfigureConfigFilePath() - } - for _, e := range r.Models { - e.ConfigureConfigFilePath() - } - for _, e := range r.Experiments { - e.ConfigureConfigFilePath() - } - for _, e := range r.ModelServingEndpoints { - e.ConfigureConfigFilePath() - } - for _, e := range r.RegisteredModels { - e.ConfigureConfigFilePath() - } - for _, e := range r.QualityMonitors { - e.ConfigureConfigFilePath() - } -} - type ConfigResource interface { + // Function to assert if the resource exists in the workspace configured in + // the input workspace client. Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) + + // Terraform equivalent name of the resource. For example "databricks_job" + // for jobs and "databricks_pipeline" for pipelines. TerraformResourceName() string - Validate() error } func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) { diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index dde5d5663..d8f97a2db 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -2,10 +2,8 @@ package resources import ( "context" - "fmt" "strconv" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" @@ -17,8 +15,6 @@ type Job struct { Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` - paths.Paths - *jobs.JobSettings } @@ -48,11 +44,3 @@ func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id stri func (j *Job) TerraformResourceName() string { return "databricks_job" } - -func (j *Job) Validate() error { - if j == nil || !j.DynamicValue.IsValid() || j.JobSettings == nil { - return fmt.Errorf("job is not defined") - } - - return nil -} diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index 7854ee7e8..0ab486436 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -2,9 +2,7 @@ package resources import ( "context" - "fmt" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" @@ -16,8 +14,6 @@ type MlflowExperiment struct { Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` - paths.Paths - *ml.Experiment } @@ -43,11 +39,3 @@ func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceCl func (s *MlflowExperiment) TerraformResourceName() string { return "databricks_mlflow_experiment" } - -func (s *MlflowExperiment) Validate() error { - if s == nil || !s.DynamicValue.IsValid() { - return fmt.Errorf("experiment is not defined") - } - - return nil -} diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 40da9f87d..300474e35 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -2,9 +2,7 @@ package resources import ( "context" - "fmt" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" @@ -16,8 +14,6 @@ type MlflowModel struct { Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` - paths.Paths - *ml.Model } @@ -43,11 +39,3 @@ func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient, func (s *MlflowModel) TerraformResourceName() string { return "databricks_mlflow_model" } - -func (s *MlflowModel) Validate() error { - if s == nil || !s.DynamicValue.IsValid() { - return fmt.Errorf("model is not defined") - } - - return nil -} diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index 503cfbbb7..5efb7ea26 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -2,9 +2,7 @@ package resources import ( "context" - "fmt" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" @@ -20,10 +18,6 @@ type ModelServingEndpoint struct { // as a reference in other resources. This value is returned by terraform. ID string `json:"id,omitempty" bundle:"readonly"` - // Path to config file where the resource is defined. All bundle resources - // include this for interpolation purposes. - paths.Paths - // This is a resource agnostic implementation of permissions for ACLs. // Implementation could be different based on the resource type. Permissions []Permission `json:"permissions,omitempty"` @@ -53,11 +47,3 @@ func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.Workspa func (s *ModelServingEndpoint) TerraformResourceName() string { return "databricks_model_serving" } - -func (s *ModelServingEndpoint) Validate() error { - if s == nil || !s.DynamicValue.IsValid() { - return fmt.Errorf("serving endpoint is not defined") - } - - return nil -} diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index 7e914b909..55270be65 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -2,9 +2,7 @@ package resources import ( "context" - "fmt" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" @@ -16,8 +14,6 @@ type Pipeline struct { Permissions []Permission `json:"permissions,omitempty"` ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` - paths.Paths - *pipelines.PipelineSpec } @@ -43,11 +39,3 @@ func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id func (p *Pipeline) TerraformResourceName() string { return "databricks_pipeline" } - -func (p *Pipeline) Validate() error { - if p == nil || !p.DynamicValue.IsValid() { - return fmt.Errorf("pipeline is not defined") - } - - return nil -} diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go index 0d13e58fa..9160782cd 100644 --- a/bundle/config/resources/quality_monitor.go +++ b/bundle/config/resources/quality_monitor.go @@ -2,9 +2,7 @@ package resources import ( "context" - "fmt" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" @@ -21,10 +19,6 @@ type QualityMonitor struct { // as a reference in other resources. This value is returned by terraform. ID string `json:"id,omitempty" bundle:"readonly"` - // Path to config file where the resource is defined. All bundle resources - // include this for interpolation purposes. - paths.Paths - ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` } @@ -50,11 +44,3 @@ func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClie func (s *QualityMonitor) TerraformResourceName() string { return "databricks_quality_monitor" } - -func (s *QualityMonitor) Validate() error { - if s == nil || !s.DynamicValue.IsValid() { - return fmt.Errorf("quality monitor is not defined") - } - - return nil -} diff --git a/bundle/config/resources/registered_model.go b/bundle/config/resources/registered_model.go index fba643c69..6033ffdf2 100644 --- a/bundle/config/resources/registered_model.go +++ b/bundle/config/resources/registered_model.go @@ -2,9 +2,7 @@ package resources import ( "context" - "fmt" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" @@ -21,10 +19,6 @@ type RegisteredModel struct { // as a reference in other resources. This value is returned by terraform. ID string `json:"id,omitempty" bundle:"readonly"` - // Path to config file where the resource is defined. All bundle resources - // include this for interpolation purposes. - paths.Paths - // This represents the input args for terraform, and will get converted // to a HCL representation for CRUD *catalog.CreateRegisteredModelRequest @@ -54,11 +48,3 @@ func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceCli func (s *RegisteredModel) TerraformResourceName() string { return "databricks_registered_model" } - -func (s *RegisteredModel) Validate() error { - if s == nil || !s.DynamicValue.IsValid() { - return fmt.Errorf("registered model is not defined") - } - - return nil -} diff --git a/bundle/config/root.go b/bundle/config/root.go index cace22156..2c6fe1a4a 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -136,17 +136,6 @@ func (r *Root) updateWithDynamicValue(nv dyn.Value) error { // Assign the normalized configuration tree. r.value = nv - - // At the moment the check has to be done as part of updateWithDynamicValue - // because otherwise ConfigureConfigFilePath will fail with a panic. - // In the future, we should move this check to a separate mutator in initialise phase. - err = r.Resources.VerifyAllResourcesDefined() - if err != nil { - return err - } - - // Assign config file paths after converting to typed configuration. - r.ConfigureConfigFilePath() return nil } @@ -238,15 +227,6 @@ func (r *Root) MarkMutatorExit(ctx context.Context) error { return nil } -// SetConfigFilePath configures the path that its configuration -// was loaded from in configuration leafs that require it. -func (r *Root) ConfigureConfigFilePath() { - r.Resources.ConfigureConfigFilePath() - if r.Artifacts != nil { - r.Artifacts.ConfigureConfigFilePath() - } -} - // Initializes variables using values passed from the command line flag // Input has to be a string of the form `foo=bar`. In this case the variable with // name `foo` is assigned the value `bar` diff --git a/bundle/config/validate/all_resources_have_values.go b/bundle/config/validate/all_resources_have_values.go new file mode 100644 index 000000000..019fe48a2 --- /dev/null +++ b/bundle/config/validate/all_resources_have_values.go @@ -0,0 +1,47 @@ +package validate + +import ( + "context" + "fmt" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +func AllResourcesHaveValues() bundle.Mutator { + return &allResourcesHaveValues{} +} + +type allResourcesHaveValues struct{} + +func (m *allResourcesHaveValues) Name() string { + return "validate:AllResourcesHaveValues" +} + +func (m *allResourcesHaveValues) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + rv := b.Config.Value().Get("resources") + + // Skip if there are no resources block defined, or the resources block is empty. + if rv.Kind() == dyn.KindInvalid || rv.Kind() == dyn.KindNil { + return nil + } + + _, err := dyn.MapByPattern( + rv, + dyn.NewPattern(dyn.AnyKey(), dyn.AnyKey()), + func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + if v.Kind() == dyn.KindInvalid || v.Kind() == dyn.KindNil { + // Type of the resource, stripped of the trailing 's' to make it + // singular. + rType := strings.TrimSuffix(p[0].Key(), "s") + + rName := p[1].Key() + return v, fmt.Errorf("%s %s is not defined", rType, rName) + } + return v, nil + }, + ) + return diag.FromErr(err) +} diff --git a/bundle/deploy/metadata/compute.go b/bundle/deploy/metadata/compute.go index 034765484..6ab997e27 100644 --- a/bundle/deploy/metadata/compute.go +++ b/bundle/deploy/metadata/compute.go @@ -39,7 +39,8 @@ func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { for name, job := range b.Config.Resources.Jobs { // Compute config file path the job is defined in, relative to the bundle // root - relativePath, err := filepath.Rel(b.RootPath, job.ConfigFilePath) + l := b.Config.GetLocation("resources.jobs." + name) + relativePath, err := filepath.Rel(b.RootPath, l.File) if err != nil { return diag.Errorf("failed to compute relative path for job %s: %v", name, err) } diff --git a/bundle/internal/bundletest/location.go b/bundle/internal/bundletest/location.go index ebec43d30..380d6e17d 100644 --- a/bundle/internal/bundletest/location.go +++ b/bundle/internal/bundletest/location.go @@ -29,6 +29,4 @@ func SetLocation(b *bundle.Bundle, prefix string, filePath string) { return v, dyn.ErrSkip }) }) - - b.Config.ConfigureConfigFilePath() } diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 7b4dc6d41..fac3066dc 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" pythonmutator "github.com/databricks/cli/bundle/config/mutator/python" + "github.com/databricks/cli/bundle/config/validate" "github.com/databricks/cli/bundle/deploy/metadata" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/permissions" @@ -19,6 +20,7 @@ func Initialize() bundle.Mutator { return newPhase( "initialize", []bundle.Mutator{ + validate.AllResourcesHaveValues(), mutator.RewriteSyncPaths(), mutator.MergeJobClusters(), mutator.MergeJobParameters(), diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index c15feb424..c7bddca14 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -7,7 +7,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -124,9 +123,6 @@ func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "test": { - Paths: paths.Paths{ - ConfigFilePath: tmpDir, - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { diff --git a/bundle/tests/environments_job_and_pipeline_test.go b/bundle/tests/environments_job_and_pipeline_test.go index a18daf90c..0abeb487c 100644 --- a/bundle/tests/environments_job_and_pipeline_test.go +++ b/bundle/tests/environments_job_and_pipeline_test.go @@ -15,7 +15,8 @@ func TestJobAndPipelineDevelopmentWithEnvironment(t *testing.T) { assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) + l := b.Config.GetLocation("resources.pipelines.nyc_taxi_pipeline") + assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(l.File)) assert.Equal(t, b.Config.Bundle.Mode, config.Development) assert.True(t, p.Development) require.Len(t, p.Libraries, 1) @@ -29,7 +30,8 @@ func TestJobAndPipelineStagingWithEnvironment(t *testing.T) { assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) + l := b.Config.GetLocation("resources.pipelines.nyc_taxi_pipeline") + assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(l.File)) assert.False(t, p.Development) require.Len(t, p.Libraries, 1) assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) @@ -42,14 +44,16 @@ func TestJobAndPipelineProductionWithEnvironment(t *testing.T) { assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) + pl := b.Config.GetLocation("resources.pipelines.nyc_taxi_pipeline") + assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(pl.File)) assert.False(t, p.Development) require.Len(t, p.Libraries, 1) assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) assert.Equal(t, "nyc_taxi_production", p.Target) j := b.Config.Resources.Jobs["pipeline_schedule"] - assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(j.ConfigFilePath)) + jl := b.Config.GetLocation("resources.jobs.pipeline_schedule") + assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(jl.File)) assert.Equal(t, "Daily refresh of production pipeline", j.Name) require.Len(t, j.Tasks, 1) assert.NotEmpty(t, j.Tasks[0].PipelineTask.PipelineId) diff --git a/bundle/tests/include_test.go b/bundle/tests/include_test.go index 5b0235f60..15f8fcec1 100644 --- a/bundle/tests/include_test.go +++ b/bundle/tests/include_test.go @@ -31,7 +31,8 @@ func TestIncludeWithGlob(t *testing.T) { job := b.Config.Resources.Jobs["my_job"] assert.Equal(t, "1", job.ID) - assert.Equal(t, "include_with_glob/job.yml", filepath.ToSlash(job.ConfigFilePath)) + l := b.Config.GetLocation("resources.jobs.my_job") + assert.Equal(t, "include_with_glob/job.yml", filepath.ToSlash(l.File)) } func TestIncludeDefault(t *testing.T) { @@ -51,9 +52,11 @@ func TestIncludeForMultipleMatches(t *testing.T) { first := b.Config.Resources.Jobs["my_first_job"] assert.Equal(t, "1", first.ID) - assert.Equal(t, "include_multiple/my_first_job/resource.yml", filepath.ToSlash(first.ConfigFilePath)) + fl := b.Config.GetLocation("resources.jobs.my_first_job") + assert.Equal(t, "include_multiple/my_first_job/resource.yml", filepath.ToSlash(fl.File)) second := b.Config.Resources.Jobs["my_second_job"] assert.Equal(t, "2", second.ID) - assert.Equal(t, "include_multiple/my_second_job/resource.yml", filepath.ToSlash(second.ConfigFilePath)) + sl := b.Config.GetLocation("resources.jobs.my_second_job") + assert.Equal(t, "include_multiple/my_second_job/resource.yml", filepath.ToSlash(sl.File)) } diff --git a/bundle/tests/job_and_pipeline_test.go b/bundle/tests/job_and_pipeline_test.go index 5e8febc33..65aa5bdc4 100644 --- a/bundle/tests/job_and_pipeline_test.go +++ b/bundle/tests/job_and_pipeline_test.go @@ -1,7 +1,6 @@ package config_tests import ( - "path/filepath" "testing" "github.com/databricks/cli/bundle/config" @@ -15,7 +14,6 @@ func TestJobAndPipelineDevelopment(t *testing.T) { assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - assert.Equal(t, "job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) assert.Equal(t, b.Config.Bundle.Mode, config.Development) assert.True(t, p.Development) require.Len(t, p.Libraries, 1) @@ -29,7 +27,6 @@ func TestJobAndPipelineStaging(t *testing.T) { assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - assert.Equal(t, "job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) assert.False(t, p.Development) require.Len(t, p.Libraries, 1) assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) @@ -42,14 +39,12 @@ func TestJobAndPipelineProduction(t *testing.T) { assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - assert.Equal(t, "job_and_pipeline/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) assert.False(t, p.Development) require.Len(t, p.Libraries, 1) assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) assert.Equal(t, "nyc_taxi_production", p.Target) j := b.Config.Resources.Jobs["pipeline_schedule"] - assert.Equal(t, "job_and_pipeline/databricks.yml", filepath.ToSlash(j.ConfigFilePath)) assert.Equal(t, "Daily refresh of production pipeline", j.Name) require.Len(t, j.Tasks, 1) assert.NotEmpty(t, j.Tasks[0].PipelineTask.PipelineId) diff --git a/bundle/tests/model_serving_endpoint_test.go b/bundle/tests/model_serving_endpoint_test.go index bfa1a31b4..b8b800863 100644 --- a/bundle/tests/model_serving_endpoint_test.go +++ b/bundle/tests/model_serving_endpoint_test.go @@ -1,7 +1,6 @@ package config_tests import ( - "path/filepath" "testing" "github.com/databricks/cli/bundle/config" @@ -10,7 +9,6 @@ import ( ) func assertExpected(t *testing.T, p *resources.ModelServingEndpoint) { - assert.Equal(t, "model_serving_endpoint/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) assert.Equal(t, "model-name", p.Config.ServedModels[0].ModelName) assert.Equal(t, "1", p.Config.ServedModels[0].ModelVersion) assert.Equal(t, "model-name-1", p.Config.TrafficConfig.Routes[0].ServedModelName) diff --git a/bundle/tests/registered_model_test.go b/bundle/tests/registered_model_test.go index 920a2ac78..008db8bdd 100644 --- a/bundle/tests/registered_model_test.go +++ b/bundle/tests/registered_model_test.go @@ -1,7 +1,6 @@ package config_tests import ( - "path/filepath" "testing" "github.com/databricks/cli/bundle/config" @@ -10,7 +9,6 @@ import ( ) func assertExpectedModel(t *testing.T, p *resources.RegisteredModel) { - assert.Equal(t, "registered_model/databricks.yml", filepath.ToSlash(p.ConfigFilePath)) assert.Equal(t, "main", p.CatalogName) assert.Equal(t, "default", p.SchemaName) assert.Equal(t, "comment", p.Comment) diff --git a/bundle/tests/undefined_job_test.go b/bundle/tests/undefined_job_test.go index ed502c471..4596f2069 100644 --- a/bundle/tests/undefined_job_test.go +++ b/bundle/tests/undefined_job_test.go @@ -1,12 +1,22 @@ package config_tests import ( + "context" "testing" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/validate" "github.com/stretchr/testify/assert" ) func TestUndefinedJobLoadsWithError(t *testing.T) { - _, diags := loadTargetWithDiags("./undefined_job", "default") + b := load(t, "./undefined_job") + diags := bundle.Apply(context.Background(), b, validate.AllResourcesHaveValues()) assert.ErrorContains(t, diags.Error(), "job undefined is not defined") } + +func TestUndefinedPipelineLoadsWithError(t *testing.T) { + b := load(t, "./undefined_pipeline") + diags := bundle.Apply(context.Background(), b, validate.AllResourcesHaveValues()) + assert.ErrorContains(t, diags.Error(), "pipeline undefined is not defined") +} diff --git a/bundle/tests/undefined_pipeline/databricks.yml b/bundle/tests/undefined_pipeline/databricks.yml new file mode 100644 index 000000000..a52fda38c --- /dev/null +++ b/bundle/tests/undefined_pipeline/databricks.yml @@ -0,0 +1,8 @@ +bundle: + name: undefined-pipeline + +resources: + pipelines: + undefined: + test: + name: "Test Pipeline" diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index 181c88cc9..839d0111a 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -9,6 +9,12 @@ import ( "github.com/databricks/cli/libs/dyn/dynvar" ) +// Populate a destination typed value from a source dynamic value. +// +// At any point while walking the destination type tree using +// reflection, if this function sees an exported field with type dyn.Value it +// will populate that field with the appropriate source dynamic value. +// see PR: https://github.com/databricks/cli/pull/1010 func ToTyped(dst any, src dyn.Value) error { dstv := reflect.ValueOf(dst) From 48ff18e5fc6c9b5a6d9cebf08b019af1e6f9ada0 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 14 Aug 2024 11:03:44 +0200 Subject: [PATCH 04/36] Upload local libraries even if they don't have artifact defined (#1664) ## Changes Previously for all the libraries referenced in configuration DABs made sure that there is corresponding artifact section. But this is not really necessary and flexible, because local libraries might be built outside of dabs context. It also created difficult to follow logic in code where we back referenced libraries to artifacts which was difficult to fllow This PR does 3 things: 1. Allows all local libraries referenced in DABs config to be uploaded to remote 2. Simplifies upload and glob references expand logic by doing this in single place 3. Speed things up by uploading library only once and doing this in parallel ## Tests Added unit + integration tests + made sure that change is backward compatible (no changes in existing tests) --------- Co-authored-by: Pieter Noordhuis --- bundle/artifacts/artifacts.go | 191 ---------- bundle/artifacts/artifacts_test.go | 196 ----------- bundle/artifacts/autodetect.go | 1 - bundle/artifacts/upload.go | 38 +- bundle/artifacts/upload_test.go | 114 ------ bundle/artifacts/whl/from_libraries.go | 79 ----- bundle/config/mutator/translate_paths_jobs.go | 2 +- bundle/libraries/expand_glob_references.go | 221 ++++++++++++ .../libraries/expand_glob_references_test.go | 239 +++++++++++++ bundle/libraries/libraries.go | 4 +- bundle/libraries/local_path.go | 39 ++- bundle/libraries/local_path_test.go | 28 +- bundle/libraries/match.go | 82 ----- bundle/libraries/match_test.go | 12 +- bundle/libraries/upload.go | 238 +++++++++++++ bundle/libraries/upload_test.go | 331 ++++++++++++++++++ bundle/phases/deploy.go | 4 +- bundle/tests/enviroment_key_test.go | 2 +- .../bundle.yml | 7 - bundle/tests/python_wheel_test.go | 45 +-- internal/bundle/artifacts_test.go | 8 +- 21 files changed, 1103 insertions(+), 778 deletions(-) delete mode 100644 bundle/artifacts/artifacts_test.go delete mode 100644 bundle/artifacts/upload_test.go delete mode 100644 bundle/artifacts/whl/from_libraries.go create mode 100644 bundle/libraries/expand_glob_references.go create mode 100644 bundle/libraries/expand_glob_references_test.go delete mode 100644 bundle/libraries/match.go create mode 100644 bundle/libraries/upload.go create mode 100644 bundle/libraries/upload_test.go diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index 3060d08d9..e5e55a14d 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -1,25 +1,16 @@ package artifacts import ( - "bytes" "context" - "errors" "fmt" - "os" - "path" - "path/filepath" - "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" - "github.com/databricks/databricks-sdk-go" ) type mutatorFactory = func(name string) bundle.Mutator @@ -28,8 +19,6 @@ var buildMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactTy config.ArtifactPythonWheel: whl.Build, } -var uploadMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{} - var prepareMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{ config.ArtifactPythonWheel: whl.Prepare, } @@ -43,15 +32,6 @@ func getBuildMutator(t config.ArtifactType, name string) bundle.Mutator { return mutatorFactory(name) } -func getUploadMutator(t config.ArtifactType, name string) bundle.Mutator { - mutatorFactory, ok := uploadMutators[t] - if !ok { - mutatorFactory = BasicUpload - } - - return mutatorFactory(name) -} - func getPrepareMutator(t config.ArtifactType, name string) bundle.Mutator { mutatorFactory, ok := prepareMutators[t] if !ok { @@ -92,174 +72,3 @@ func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnosti return nil } - -// Basic Upload defines a general upload mutator which uploads artifact as a library to workspace -type basicUpload struct { - name string -} - -func BasicUpload(name string) bundle.Mutator { - return &basicUpload{name: name} -} - -func (m *basicUpload) Name() string { - return fmt.Sprintf("artifacts.Upload(%s)", m.name) -} - -func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - artifact, ok := b.Config.Artifacts[m.name] - if !ok { - return diag.Errorf("artifact doesn't exist: %s", m.name) - } - - if len(artifact.Files) == 0 { - return diag.Errorf("artifact source is not configured: %s", m.name) - } - - uploadPath, err := getUploadBasePath(b) - if err != nil { - return diag.FromErr(err) - } - - client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath) - if err != nil { - return diag.FromErr(err) - } - - err = uploadArtifact(ctx, b, artifact, uploadPath, client) - if err != nil { - return diag.Errorf("upload for %s failed, error: %v", m.name, err) - } - - return nil -} - -func getFilerForArtifacts(w *databricks.WorkspaceClient, uploadPath string) (filer.Filer, error) { - if isVolumesPath(uploadPath) { - return filer.NewFilesClient(w, uploadPath) - } - return filer.NewWorkspaceFilesClient(w, uploadPath) -} - -func isVolumesPath(path string) bool { - return strings.HasPrefix(path, "/Volumes/") -} - -func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error { - for i := range a.Files { - f := &a.Files[i] - - filename := filepath.Base(f.Source) - cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename)) - - err := uploadArtifactFile(ctx, f.Source, client) - if err != nil { - return err - } - - log.Infof(ctx, "Upload succeeded") - f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) - remotePath := f.RemotePath - - if !strings.HasPrefix(f.RemotePath, "/Workspace/") && !strings.HasPrefix(f.RemotePath, "/Volumes/") { - wsfsBase := "/Workspace" - remotePath = path.Join(wsfsBase, f.RemotePath) - } - - for _, job := range b.Config.Resources.Jobs { - rewriteArtifactPath(b, f, job, remotePath) - } - } - - return nil -} - -func rewriteArtifactPath(b *bundle.Bundle, f *config.ArtifactFile, job *resources.Job, remotePath string) { - // Rewrite artifact path in job task libraries - for i := range job.Tasks { - task := &job.Tasks[i] - for j := range task.Libraries { - lib := &task.Libraries[j] - if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { - lib.Whl = remotePath - } - if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { - lib.Jar = remotePath - } - } - - // Rewrite artifact path in job task libraries for ForEachTask - if task.ForEachTask != nil { - forEachTask := task.ForEachTask - for j := range forEachTask.Task.Libraries { - lib := &forEachTask.Task.Libraries[j] - if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { - lib.Whl = remotePath - } - if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { - lib.Jar = remotePath - } - } - } - } - - // Rewrite artifact path in job environments - for i := range job.Environments { - env := &job.Environments[i] - if env.Spec == nil { - continue - } - - for j := range env.Spec.Dependencies { - lib := env.Spec.Dependencies[j] - if isArtifactMatchLibrary(f, lib, b) { - env.Spec.Dependencies[j] = remotePath - } - } - } -} - -func isArtifactMatchLibrary(f *config.ArtifactFile, libPath string, b *bundle.Bundle) bool { - if !filepath.IsAbs(libPath) { - libPath = filepath.Join(b.RootPath, libPath) - } - - // libPath can be a glob pattern, so do the match first - matches, err := filepath.Glob(libPath) - if err != nil { - return false - } - - for _, m := range matches { - if m == f.Source { - return true - } - } - - return false -} - -// Function to upload artifact file to Workspace -func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error { - raw, err := os.ReadFile(file) - if err != nil { - return fmt.Errorf("unable to read %s: %w", file, errors.Unwrap(err)) - } - - filename := filepath.Base(file) - err = client.Write(ctx, filename, bytes.NewReader(raw), filer.OverwriteIfExists, filer.CreateParentDirectories) - if err != nil { - return fmt.Errorf("unable to import %s: %w", filename, err) - } - - return nil -} - -func getUploadBasePath(b *bundle.Bundle) (string, error) { - artifactPath := b.Config.Workspace.ArtifactPath - if artifactPath == "" { - return "", fmt.Errorf("remote artifact path not configured") - } - - return path.Join(artifactPath, ".internal"), nil -} diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go deleted file mode 100644 index 6d85f3af9..000000000 --- a/bundle/artifacts/artifacts_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package artifacts - -import ( - "context" - "path/filepath" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/resources" - mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" - "github.com/databricks/cli/internal/testutil" - "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/stretchr/testify/mock" - "github.com/stretchr/testify/require" -) - -func TestArtifactUploadForWorkspace(t *testing.T) { - tmpDir := t.TempDir() - whlFolder := filepath.Join(tmpDir, "whl") - testutil.Touch(t, whlFolder, "source.whl") - whlLocalPath := filepath.Join(whlFolder, "source.whl") - - b := &bundle.Bundle{ - RootPath: tmpDir, - Config: config.Root{ - Workspace: config.Workspace{ - ArtifactPath: "/foo/bar/artifacts", - }, - Artifacts: config.Artifacts{ - "whl": { - Type: config.ArtifactPythonWheel, - Files: []config.ArtifactFile{ - {Source: whlLocalPath}, - }, - }, - }, - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "job": { - JobSettings: &jobs.JobSettings{ - Tasks: []jobs.Task{ - { - Libraries: []compute.Library{ - { - Whl: filepath.Join("whl", "*.whl"), - }, - { - Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", - }, - }, - }, - { - ForEachTask: &jobs.ForEachTask{ - Task: jobs.Task{ - Libraries: []compute.Library{ - { - Whl: filepath.Join("whl", "*.whl"), - }, - { - Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", - }, - }, - }, - }, - }, - }, - Environments: []jobs.JobEnvironment{ - { - Spec: &compute.Environment{ - Dependencies: []string{ - filepath.Join("whl", "source.whl"), - "/Workspace/Users/foo@bar.com/mywheel.whl", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - artifact := b.Config.Artifacts["whl"] - mockFiler := mockfiler.NewMockFiler(t) - mockFiler.EXPECT().Write( - mock.Anything, - filepath.Join("source.whl"), - mock.AnythingOfType("*bytes.Reader"), - filer.OverwriteIfExists, - filer.CreateParentDirectories, - ).Return(nil) - - err := uploadArtifact(context.Background(), b, artifact, "/foo/bar/artifacts", mockFiler) - require.NoError(t, err) - - // Test that libraries path is updated - require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl) - require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) - require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) - require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) - require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) - require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) -} - -func TestArtifactUploadForVolumes(t *testing.T) { - tmpDir := t.TempDir() - whlFolder := filepath.Join(tmpDir, "whl") - testutil.Touch(t, whlFolder, "source.whl") - whlLocalPath := filepath.Join(whlFolder, "source.whl") - - b := &bundle.Bundle{ - RootPath: tmpDir, - Config: config.Root{ - Workspace: config.Workspace{ - ArtifactPath: "/Volumes/foo/bar/artifacts", - }, - Artifacts: config.Artifacts{ - "whl": { - Type: config.ArtifactPythonWheel, - Files: []config.ArtifactFile{ - {Source: whlLocalPath}, - }, - }, - }, - Resources: config.Resources{ - Jobs: map[string]*resources.Job{ - "job": { - JobSettings: &jobs.JobSettings{ - Tasks: []jobs.Task{ - { - Libraries: []compute.Library{ - { - Whl: filepath.Join("whl", "*.whl"), - }, - { - Whl: "/Volumes/some/path/mywheel.whl", - }, - }, - }, - { - ForEachTask: &jobs.ForEachTask{ - Task: jobs.Task{ - Libraries: []compute.Library{ - { - Whl: filepath.Join("whl", "*.whl"), - }, - { - Whl: "/Volumes/some/path/mywheel.whl", - }, - }, - }, - }, - }, - }, - Environments: []jobs.JobEnvironment{ - { - Spec: &compute.Environment{ - Dependencies: []string{ - filepath.Join("whl", "source.whl"), - "/Volumes/some/path/mywheel.whl", - }, - }, - }, - }, - }, - }, - }, - }, - }, - } - - artifact := b.Config.Artifacts["whl"] - mockFiler := mockfiler.NewMockFiler(t) - mockFiler.EXPECT().Write( - mock.Anything, - filepath.Join("source.whl"), - mock.AnythingOfType("*bytes.Reader"), - filer.OverwriteIfExists, - filer.CreateParentDirectories, - ).Return(nil) - - err := uploadArtifact(context.Background(), b, artifact, "/Volumes/foo/bar/artifacts", mockFiler) - require.NoError(t, err) - - // Test that libraries path is updated - require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl) - require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) - require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) - require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) - require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) - require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) -} diff --git a/bundle/artifacts/autodetect.go b/bundle/artifacts/autodetect.go index 0e94edd82..569a480f0 100644 --- a/bundle/artifacts/autodetect.go +++ b/bundle/artifacts/autodetect.go @@ -29,6 +29,5 @@ func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnosti return bundle.Apply(ctx, b, bundle.Seq( whl.DetectPackage(), - whl.DefineArtifactsFromLibraries(), )) } diff --git a/bundle/artifacts/upload.go b/bundle/artifacts/upload.go index 3af50021e..58c006dc1 100644 --- a/bundle/artifacts/upload.go +++ b/bundle/artifacts/upload.go @@ -2,50 +2,18 @@ package artifacts import ( "context" - "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) -func UploadAll() bundle.Mutator { - return &all{ - name: "Upload", - fn: uploadArtifactByName, - } -} - func CleanUp() bundle.Mutator { return &cleanUp{} } -type upload struct { - name string -} - -func uploadArtifactByName(name string) (bundle.Mutator, error) { - return &upload{name}, nil -} - -func (m *upload) Name() string { - return fmt.Sprintf("artifacts.Upload(%s)", m.name) -} - -func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - artifact, ok := b.Config.Artifacts[m.name] - if !ok { - return diag.Errorf("artifact doesn't exist: %s", m.name) - } - - if len(artifact.Files) == 0 { - return diag.Errorf("artifact source is not configured: %s", m.name) - } - - return bundle.Apply(ctx, b, getUploadMutator(artifact.Type, m.name)) -} - type cleanUp struct{} func (m *cleanUp) Name() string { @@ -53,12 +21,12 @@ func (m *cleanUp) Name() string { } func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - uploadPath, err := getUploadBasePath(b) + uploadPath, err := libraries.GetUploadBasePath(b) if err != nil { return diag.FromErr(err) } - client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath) + client, err := libraries.GetFilerForLibraries(b.WorkspaceClient(), uploadPath) if err != nil { return diag.FromErr(err) } diff --git a/bundle/artifacts/upload_test.go b/bundle/artifacts/upload_test.go deleted file mode 100644 index 202086bd3..000000000 --- a/bundle/artifacts/upload_test.go +++ /dev/null @@ -1,114 +0,0 @@ -package artifacts - -import ( - "context" - "os" - "path/filepath" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/internal/bundletest" - "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/testfile" - "github.com/stretchr/testify/require" -) - -type noop struct{} - -func (n *noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics { - return nil -} - -func (n *noop) Name() string { - return "noop" -} - -func TestExpandGlobFilesSource(t *testing.T) { - rootPath := t.TempDir() - err := os.Mkdir(filepath.Join(rootPath, "test"), 0755) - require.NoError(t, err) - - t1 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar1.jar")) - t1.Close(t) - - t2 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar2.jar")) - t2.Close(t) - - b := &bundle.Bundle{ - RootPath: rootPath, - Config: config.Root{ - Artifacts: map[string]*config.Artifact{ - "test": { - Type: "custom", - Files: []config.ArtifactFile{ - { - Source: filepath.Join("..", "test", "*.jar"), - }, - }, - }, - }, - }, - } - - bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml")) - - u := &upload{"test"} - uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { - return &noop{} - } - - bm := &build{"test"} - buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { - return &noop{} - } - - pm := &prepare{"test"} - prepareMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { - return &noop{} - } - - diags := bundle.Apply(context.Background(), b, bundle.Seq(pm, bm, u)) - require.NoError(t, diags.Error()) - - require.Equal(t, 2, len(b.Config.Artifacts["test"].Files)) - require.Equal(t, filepath.Join(rootPath, "test", "myjar1.jar"), b.Config.Artifacts["test"].Files[0].Source) - require.Equal(t, filepath.Join(rootPath, "test", "myjar2.jar"), b.Config.Artifacts["test"].Files[1].Source) -} - -func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) { - rootPath := t.TempDir() - err := os.Mkdir(filepath.Join(rootPath, "test"), 0755) - require.NoError(t, err) - - b := &bundle.Bundle{ - RootPath: rootPath, - Config: config.Root{ - Artifacts: map[string]*config.Artifact{ - "test": { - Type: "custom", - Files: []config.ArtifactFile{ - { - Source: filepath.Join("..", "test", "myjar.jar"), - }, - }, - }, - }, - }, - } - - bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml")) - - u := &upload{"test"} - uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { - return &noop{} - } - - bm := &build{"test"} - buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { - return &noop{} - } - - diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u)) - require.ErrorContains(t, diags.Error(), "no matching files") -} diff --git a/bundle/artifacts/whl/from_libraries.go b/bundle/artifacts/whl/from_libraries.go deleted file mode 100644 index 79161a82d..000000000 --- a/bundle/artifacts/whl/from_libraries.go +++ /dev/null @@ -1,79 +0,0 @@ -package whl - -import ( - "context" - "path/filepath" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/log" -) - -type fromLibraries struct{} - -func DefineArtifactsFromLibraries() bundle.Mutator { - return &fromLibraries{} -} - -func (m *fromLibraries) Name() string { - return "artifacts.whl.DefineArtifactsFromLibraries" -} - -func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - if len(b.Config.Artifacts) != 0 { - log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined") - return nil - } - - tasks := libraries.FindTasksWithLocalLibraries(b) - for _, task := range tasks { - // Skip tasks that are not PythonWheelTasks for now, we can later support Jars too - if task.PythonWheelTask == nil { - continue - } - - for _, lib := range task.Libraries { - matchAndAdd(ctx, lib.Whl, b) - } - } - - envs := libraries.FindAllEnvironments(b) - for _, jobEnvs := range envs { - for _, env := range jobEnvs { - if env.Spec != nil { - for _, dep := range env.Spec.Dependencies { - if libraries.IsEnvironmentDependencyLocal(dep) { - matchAndAdd(ctx, dep, b) - } - } - } - } - } - - return nil -} - -func matchAndAdd(ctx context.Context, lib string, b *bundle.Bundle) { - matches, err := filepath.Glob(filepath.Join(b.RootPath, lib)) - // File referenced from libraries section does not exists, skipping - if err != nil { - return - } - - for _, match := range matches { - name := filepath.Base(match) - if b.Config.Artifacts == nil { - b.Config.Artifacts = make(map[string]*config.Artifact) - } - - log.Debugf(ctx, "Adding an artifact block for %s", match) - b.Config.Artifacts[name] = &config.Artifact{ - Files: []config.ArtifactFile{ - {Source: match}, - }, - Type: config.ArtifactPythonWheel, - } - } -} diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index 60cc8bb9a..6febf4f8f 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -78,7 +78,7 @@ func (t *translateContext) jobRewritePatterns() []jobRewritePattern { ), t.translateNoOpWithPrefix, func(s string) bool { - return !libraries.IsEnvironmentDependencyLocal(s) + return !libraries.IsLibraryLocal(s) }, }, } diff --git a/bundle/libraries/expand_glob_references.go b/bundle/libraries/expand_glob_references.go new file mode 100644 index 000000000..9e90a2a17 --- /dev/null +++ b/bundle/libraries/expand_glob_references.go @@ -0,0 +1,221 @@ +package libraries + +import ( + "context" + "fmt" + "path/filepath" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type expand struct { +} + +func matchError(p dyn.Path, l []dyn.Location, message string) diag.Diagnostic { + return diag.Diagnostic{ + Severity: diag.Error, + Summary: message, + Paths: []dyn.Path{ + p.Append(), + }, + Locations: l, + } +} + +func getLibDetails(v dyn.Value) (string, string, bool) { + m := v.MustMap() + whl, ok := m.GetByString("whl") + if ok { + return whl.MustString(), "whl", true + } + + jar, ok := m.GetByString("jar") + if ok { + return jar.MustString(), "jar", true + } + + return "", "", false +} + +func findMatches(b *bundle.Bundle, path string) ([]string, error) { + matches, err := filepath.Glob(filepath.Join(b.RootPath, path)) + if err != nil { + return nil, err + } + + if len(matches) == 0 { + if isGlobPattern(path) { + return nil, fmt.Errorf("no files match pattern: %s", path) + } else { + return nil, fmt.Errorf("file doesn't exist %s", path) + } + } + + // We make the matched path relative to the root path before storing it + // to allow upload mutator to distinguish between local and remote paths + for i, match := range matches { + matches[i], err = filepath.Rel(b.RootPath, match) + if err != nil { + return nil, err + } + } + + return matches, nil +} + +// Checks if the path is a glob pattern +// It can contain *, [] or ? characters +func isGlobPattern(path string) bool { + return strings.ContainsAny(path, "*?[") +} + +func expandLibraries(b *bundle.Bundle, p dyn.Path, v dyn.Value) (diag.Diagnostics, []dyn.Value) { + var output []dyn.Value + var diags diag.Diagnostics + + libs := v.MustSequence() + for i, lib := range libs { + lp := p.Append(dyn.Index(i)) + path, libType, supported := getLibDetails(lib) + if !supported || !IsLibraryLocal(path) { + output = append(output, lib) + continue + } + + lp = lp.Append(dyn.Key(libType)) + + matches, err := findMatches(b, path) + if err != nil { + diags = diags.Append(matchError(lp, lib.Locations(), err.Error())) + continue + } + + for _, match := range matches { + output = append(output, dyn.NewValue(map[string]dyn.Value{ + libType: dyn.V(match), + }, lib.Locations())) + } + } + + return diags, output +} + +func expandEnvironmentDeps(b *bundle.Bundle, p dyn.Path, v dyn.Value) (diag.Diagnostics, []dyn.Value) { + var output []dyn.Value + var diags diag.Diagnostics + + deps := v.MustSequence() + for i, dep := range deps { + lp := p.Append(dyn.Index(i)) + path := dep.MustString() + if !IsLibraryLocal(path) { + output = append(output, dep) + continue + } + + matches, err := findMatches(b, path) + if err != nil { + diags = diags.Append(matchError(lp, dep.Locations(), err.Error())) + continue + } + + for _, match := range matches { + output = append(output, dyn.NewValue(match, dep.Locations())) + } + } + + return diags, output +} + +type expandPattern struct { + pattern dyn.Pattern + fn func(b *bundle.Bundle, p dyn.Path, v dyn.Value) (diag.Diagnostics, []dyn.Value) +} + +var taskLibrariesPattern = dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("jobs"), + dyn.AnyKey(), + dyn.Key("tasks"), + dyn.AnyIndex(), + dyn.Key("libraries"), +) + +var forEachTaskLibrariesPattern = dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("jobs"), + dyn.AnyKey(), + dyn.Key("tasks"), + dyn.AnyIndex(), + dyn.Key("for_each_task"), + dyn.Key("task"), + dyn.Key("libraries"), +) + +var envDepsPattern = dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("jobs"), + dyn.AnyKey(), + dyn.Key("environments"), + dyn.AnyIndex(), + dyn.Key("spec"), + dyn.Key("dependencies"), +) + +func (e *expand) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + expanders := []expandPattern{ + { + pattern: taskLibrariesPattern, + fn: expandLibraries, + }, + { + pattern: forEachTaskLibrariesPattern, + fn: expandLibraries, + }, + { + pattern: envDepsPattern, + fn: expandEnvironmentDeps, + }, + } + + var diags diag.Diagnostics + + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var err error + for _, expander := range expanders { + v, err = dyn.MapByPattern(v, expander.pattern, func(p dyn.Path, lv dyn.Value) (dyn.Value, error) { + d, output := expander.fn(b, p, lv) + diags = diags.Extend(d) + return dyn.V(output), nil + }) + + if err != nil { + return dyn.InvalidValue, err + } + } + + return v, nil + }) + + if err != nil { + diags = diags.Extend(diag.FromErr(err)) + } + + return diags +} + +func (e *expand) Name() string { + return "libraries.ExpandGlobReferences" +} + +// ExpandGlobReferences expands any glob references in the libraries or environments section +// to corresponding local paths. +// We only expand local paths (i.e. paths that are relative to the root path). +// After expanding we make the paths relative to the root path to allow upload mutator later in the chain to +// distinguish between local and remote paths. +func ExpandGlobReferences() bundle.Mutator { + return &expand{} +} diff --git a/bundle/libraries/expand_glob_references_test.go b/bundle/libraries/expand_glob_references_test.go new file mode 100644 index 000000000..34855b539 --- /dev/null +++ b/bundle/libraries/expand_glob_references_test.go @@ -0,0 +1,239 @@ +package libraries + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestGlobReferencesExpandedForTaskLibraries(t *testing.T) { + dir := t.TempDir() + testutil.Touch(t, dir, "whl", "my1.whl") + testutil.Touch(t, dir, "whl", "my2.whl") + testutil.Touch(t, dir, "jar", "my1.jar") + testutil.Touch(t, dir, "jar", "my2.jar") + + b := &bundle.Bundle{ + RootPath: dir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "task", + Libraries: []compute.Library{ + { + Whl: "whl/*.whl", + }, + { + Whl: "/Workspace/path/to/whl/my.whl", + }, + { + Jar: "./jar/*.jar", + }, + { + Egg: "egg/*.egg", + }, + { + Jar: "/Workspace/path/to/jar/*.jar", + }, + { + Whl: "/some/full/path/to/whl/*.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + + diags := bundle.Apply(context.Background(), b, ExpandGlobReferences()) + require.Empty(t, diags) + + job := b.Config.Resources.Jobs["job"] + task := job.JobSettings.Tasks[0] + require.Equal(t, []compute.Library{ + { + Whl: filepath.Join("whl", "my1.whl"), + }, + { + Whl: filepath.Join("whl", "my2.whl"), + }, + { + Whl: "/Workspace/path/to/whl/my.whl", + }, + { + Jar: filepath.Join("jar", "my1.jar"), + }, + { + Jar: filepath.Join("jar", "my2.jar"), + }, + { + Egg: "egg/*.egg", + }, + { + Jar: "/Workspace/path/to/jar/*.jar", + }, + { + Whl: "/some/full/path/to/whl/*.whl", + }, + }, task.Libraries) +} + +func TestGlobReferencesExpandedForForeachTaskLibraries(t *testing.T) { + dir := t.TempDir() + testutil.Touch(t, dir, "whl", "my1.whl") + testutil.Touch(t, dir, "whl", "my2.whl") + testutil.Touch(t, dir, "jar", "my1.jar") + testutil.Touch(t, dir, "jar", "my2.jar") + + b := &bundle.Bundle{ + RootPath: dir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "task", + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + Libraries: []compute.Library{ + { + Whl: "whl/*.whl", + }, + { + Whl: "/Workspace/path/to/whl/my.whl", + }, + { + Jar: "./jar/*.jar", + }, + { + Egg: "egg/*.egg", + }, + { + Jar: "/Workspace/path/to/jar/*.jar", + }, + { + Whl: "/some/full/path/to/whl/*.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + + diags := bundle.Apply(context.Background(), b, ExpandGlobReferences()) + require.Empty(t, diags) + + job := b.Config.Resources.Jobs["job"] + task := job.JobSettings.Tasks[0].ForEachTask.Task + require.Equal(t, []compute.Library{ + { + Whl: filepath.Join("whl", "my1.whl"), + }, + { + Whl: filepath.Join("whl", "my2.whl"), + }, + { + Whl: "/Workspace/path/to/whl/my.whl", + }, + { + Jar: filepath.Join("jar", "my1.jar"), + }, + { + Jar: filepath.Join("jar", "my2.jar"), + }, + { + Egg: "egg/*.egg", + }, + { + Jar: "/Workspace/path/to/jar/*.jar", + }, + { + Whl: "/some/full/path/to/whl/*.whl", + }, + }, task.Libraries) +} + +func TestGlobReferencesExpandedForEnvironmentsDeps(t *testing.T) { + dir := t.TempDir() + testutil.Touch(t, dir, "whl", "my1.whl") + testutil.Touch(t, dir, "whl", "my2.whl") + testutil.Touch(t, dir, "jar", "my1.jar") + testutil.Touch(t, dir, "jar", "my2.jar") + + b := &bundle.Bundle{ + RootPath: dir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "task", + EnvironmentKey: "env", + }, + }, + Environments: []jobs.JobEnvironment{ + { + EnvironmentKey: "env", + Spec: &compute.Environment{ + Dependencies: []string{ + "./whl/*.whl", + "/Workspace/path/to/whl/my.whl", + "./jar/*.jar", + "/some/local/path/to/whl/*.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + + diags := bundle.Apply(context.Background(), b, ExpandGlobReferences()) + require.Empty(t, diags) + + job := b.Config.Resources.Jobs["job"] + env := job.JobSettings.Environments[0] + require.Equal(t, []string{ + filepath.Join("whl", "my1.whl"), + filepath.Join("whl", "my2.whl"), + "/Workspace/path/to/whl/my.whl", + filepath.Join("jar", "my1.jar"), + filepath.Join("jar", "my2.jar"), + "/some/local/path/to/whl/*.whl", + }, env.Spec.Dependencies) +} diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 72e5bcc66..33b848dd9 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -35,7 +35,7 @@ func isEnvsWithLocalLibraries(envs []jobs.JobEnvironment) bool { } for _, l := range e.Spec.Dependencies { - if IsEnvironmentDependencyLocal(l) { + if IsLibraryLocal(l) { return true } } @@ -67,7 +67,7 @@ func FindTasksWithLocalLibraries(b *bundle.Bundle) []jobs.Task { func isTaskWithLocalLibraries(task jobs.Task) bool { for _, l := range task.Libraries { - if IsLocalLibrary(&l) { + if IsLibraryLocal(libraryPath(&l)) { return true } } diff --git a/bundle/libraries/local_path.go b/bundle/libraries/local_path.go index f1e3788f2..5b5ec6c07 100644 --- a/bundle/libraries/local_path.go +++ b/bundle/libraries/local_path.go @@ -4,8 +4,6 @@ import ( "net/url" "path" "strings" - - "github.com/databricks/databricks-sdk-go/service/compute" ) // IsLocalPath returns true if the specified path indicates that @@ -38,12 +36,12 @@ func IsLocalPath(p string) bool { return !path.IsAbs(p) } -// IsEnvironmentDependencyLocal returns true if the specified dependency +// IsLibraryLocal returns true if the specified library or environment dependency // should be interpreted as a local path. -// We use this to check if the dependency in environment spec is local. +// We use this to check if the dependency in environment spec is local or that library is local. // We can't use IsLocalPath beacuse environment dependencies can be // a pypi package name which can be misinterpreted as a local path by IsLocalPath. -func IsEnvironmentDependencyLocal(dep string) bool { +func IsLibraryLocal(dep string) bool { possiblePrefixes := []string{ ".", } @@ -54,7 +52,22 @@ func IsEnvironmentDependencyLocal(dep string) bool { } } - return false + // If the dependency is a requirements file, it's not a valid local path + if strings.HasPrefix(dep, "-r") { + return false + } + + // If the dependency has no extension, it's a PyPi package name + if isPackage(dep) { + return false + } + + return IsLocalPath(dep) +} + +func isPackage(name string) bool { + // If the dependency has no extension, it's a PyPi package name + return path.Ext(name) == "" } func isRemoteStorageScheme(path string) bool { @@ -67,16 +80,6 @@ func isRemoteStorageScheme(path string) bool { return false } - // If the path starts with scheme:/ format, it's a correct remote storage scheme - return strings.HasPrefix(path, url.Scheme+":/") -} - -// IsLocalLibrary returns true if the specified library refers to a local path. -func IsLocalLibrary(library *compute.Library) bool { - path := libraryPath(library) - if path == "" { - return false - } - - return IsLocalPath(path) + // If the path starts with scheme:/ format (not file), it's a correct remote storage scheme + return strings.HasPrefix(path, url.Scheme+":/") && url.Scheme != "file" } diff --git a/bundle/libraries/local_path_test.go b/bundle/libraries/local_path_test.go index d2492d6b1..be4028d52 100644 --- a/bundle/libraries/local_path_test.go +++ b/bundle/libraries/local_path_test.go @@ -3,13 +3,13 @@ package libraries import ( "testing" - "github.com/databricks/databricks-sdk-go/service/compute" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestIsLocalPath(t *testing.T) { // Relative paths, paths with the file scheme, and Windows paths. + assert.True(t, IsLocalPath("some/local/path")) assert.True(t, IsLocalPath("./some/local/path")) assert.True(t, IsLocalPath("file://path/to/package")) assert.True(t, IsLocalPath("C:\\path\\to\\package")) @@ -30,24 +30,13 @@ func TestIsLocalPath(t *testing.T) { assert.False(t, IsLocalPath("abfss://path/to/package")) } -func TestIsLocalLibrary(t *testing.T) { - // Local paths. - assert.True(t, IsLocalLibrary(&compute.Library{Whl: "./file.whl"})) - assert.True(t, IsLocalLibrary(&compute.Library{Jar: "../target/some.jar"})) - - // Non-local paths. - assert.False(t, IsLocalLibrary(&compute.Library{Whl: "/Workspace/path/to/file.whl"})) - assert.False(t, IsLocalLibrary(&compute.Library{Jar: "s3:/bucket/path/some.jar"})) - - // Empty. - assert.False(t, IsLocalLibrary(&compute.Library{})) -} - -func TestIsEnvironmentDependencyLocal(t *testing.T) { +func TestIsLibraryLocal(t *testing.T) { testCases := [](struct { path string expected bool }){ + {path: "local/*.whl", expected: true}, + {path: "local/test.whl", expected: true}, {path: "./local/*.whl", expected: true}, {path: ".\\local\\*.whl", expected: true}, {path: "./local/mypath.whl", expected: true}, @@ -58,15 +47,16 @@ func TestIsEnvironmentDependencyLocal(t *testing.T) { {path: ".\\..\\local\\*.whl", expected: true}, {path: "../../local/*.whl", expected: true}, {path: "..\\..\\local\\*.whl", expected: true}, + {path: "file://path/to/package/whl.whl", expected: true}, {path: "pypipackage", expected: false}, - {path: "pypipackage/test.whl", expected: false}, - {path: "pypipackage/*.whl", expected: false}, {path: "/Volumes/catalog/schema/volume/path.whl", expected: false}, {path: "/Workspace/my_project/dist.whl", expected: false}, {path: "-r /Workspace/my_project/requirements.txt", expected: false}, + {path: "s3://mybucket/path/to/package", expected: false}, + {path: "dbfs:/mnt/path/to/package", expected: false}, } - for _, tc := range testCases { - require.Equal(t, IsEnvironmentDependencyLocal(tc.path), tc.expected) + for i, tc := range testCases { + require.Equalf(t, tc.expected, IsLibraryLocal(tc.path), "failed case: %d, path: %s", i, tc.path) } } diff --git a/bundle/libraries/match.go b/bundle/libraries/match.go deleted file mode 100644 index 4feb4225d..000000000 --- a/bundle/libraries/match.go +++ /dev/null @@ -1,82 +0,0 @@ -package libraries - -import ( - "context" - "fmt" - "path/filepath" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/libs/diag" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/jobs" -) - -type match struct { -} - -func ValidateLocalLibrariesExist() bundle.Mutator { - return &match{} -} - -func (a *match) Name() string { - return "libraries.ValidateLocalLibrariesExist" -} - -func (a *match) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - for _, job := range b.Config.Resources.Jobs { - err := validateEnvironments(job.Environments, b) - if err != nil { - return diag.FromErr(err) - } - - for _, task := range job.JobSettings.Tasks { - err := validateTaskLibraries(task.Libraries, b) - if err != nil { - return diag.FromErr(err) - } - } - } - - return nil -} - -func validateTaskLibraries(libs []compute.Library, b *bundle.Bundle) error { - for _, lib := range libs { - path := libraryPath(&lib) - if path == "" || !IsLocalPath(path) { - continue - } - - matches, err := filepath.Glob(filepath.Join(b.RootPath, path)) - if err != nil { - return err - } - - if len(matches) == 0 { - return fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libraryPath(&lib)) - } - } - - return nil -} - -func validateEnvironments(envs []jobs.JobEnvironment, b *bundle.Bundle) error { - for _, env := range envs { - if env.Spec == nil { - continue - } - - for _, dep := range env.Spec.Dependencies { - matches, err := filepath.Glob(filepath.Join(b.RootPath, dep)) - if err != nil { - return err - } - - if len(matches) == 0 && IsEnvironmentDependencyLocal(dep) { - return fmt.Errorf("file %s is referenced in environments section but doesn't exist on the local file system", dep) - } - } - } - - return nil -} diff --git a/bundle/libraries/match_test.go b/bundle/libraries/match_test.go index bb4b15107..e60504c84 100644 --- a/bundle/libraries/match_test.go +++ b/bundle/libraries/match_test.go @@ -42,7 +42,7 @@ func TestValidateEnvironments(t *testing.T) { }, } - diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + diags := bundle.Apply(context.Background(), b, ExpandGlobReferences()) require.Nil(t, diags) } @@ -74,9 +74,9 @@ func TestValidateEnvironmentsNoFile(t *testing.T) { }, } - diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + diags := bundle.Apply(context.Background(), b, ExpandGlobReferences()) require.Len(t, diags, 1) - require.Equal(t, "file ./wheel.whl is referenced in environments section but doesn't exist on the local file system", diags[0].Summary) + require.Equal(t, "file doesn't exist ./wheel.whl", diags[0].Summary) } func TestValidateTaskLibraries(t *testing.T) { @@ -109,7 +109,7 @@ func TestValidateTaskLibraries(t *testing.T) { }, } - diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + diags := bundle.Apply(context.Background(), b, ExpandGlobReferences()) require.Nil(t, diags) } @@ -142,7 +142,7 @@ func TestValidateTaskLibrariesNoFile(t *testing.T) { }, } - diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + diags := bundle.Apply(context.Background(), b, ExpandGlobReferences()) require.Len(t, diags, 1) - require.Equal(t, "file ./wheel.whl is referenced in libraries section but doesn't exist on the local file system", diags[0].Summary) + require.Equal(t, "file doesn't exist ./wheel.whl", diags[0].Summary) } diff --git a/bundle/libraries/upload.go b/bundle/libraries/upload.go new file mode 100644 index 000000000..be7cc41db --- /dev/null +++ b/bundle/libraries/upload.go @@ -0,0 +1,238 @@ +package libraries + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "path/filepath" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/log" + + "github.com/databricks/databricks-sdk-go" + + "golang.org/x/sync/errgroup" +) + +// The Files API backend has a rate limit of 10 concurrent +// requests and 100 QPS. We limit the number of concurrent requests to 5 to +// avoid hitting the rate limit. +var maxFilesRequestsInFlight = 5 + +func Upload() bundle.Mutator { + return &upload{} +} + +func UploadWithClient(client filer.Filer) bundle.Mutator { + return &upload{ + client: client, + } +} + +type upload struct { + client filer.Filer +} + +type configLocation struct { + configPath dyn.Path + location dyn.Location +} + +// Collect all libraries from the bundle configuration and their config paths. +// By this stage all glob references are expanded and we have a list of all libraries that need to be uploaded. +// We collect them from task libraries, foreach task libraries, environment dependencies, and artifacts. +// We return a map of library source to a list of config paths and locations where the library is used. +// We use map so we don't upload the same library multiple times. +// Instead we upload it once and update all the config paths to point to the uploaded location. +func collectLocalLibraries(b *bundle.Bundle) (map[string][]configLocation, error) { + libs := make(map[string]([]configLocation)) + + patterns := []dyn.Pattern{ + taskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("whl")), + taskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("jar")), + forEachTaskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("whl")), + forEachTaskLibrariesPattern.Append(dyn.AnyIndex(), dyn.Key("jar")), + envDepsPattern.Append(dyn.AnyIndex()), + } + + for _, pattern := range patterns { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + return dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + source, ok := v.AsString() + if !ok { + return v, fmt.Errorf("expected string, got %s", v.Kind()) + } + + if !IsLibraryLocal(source) { + return v, nil + } + + source = filepath.Join(b.RootPath, source) + libs[source] = append(libs[source], configLocation{ + configPath: p.Append(), // Hack to get the copy of path + location: v.Location(), + }) + + return v, nil + }) + }) + + if err != nil { + return nil, err + } + } + + artifactPattern := dyn.NewPattern( + dyn.Key("artifacts"), + dyn.AnyKey(), + dyn.Key("files"), + dyn.AnyIndex(), + ) + + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + return dyn.MapByPattern(v, artifactPattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + file, ok := v.AsMap() + if !ok { + return v, fmt.Errorf("expected map, got %s", v.Kind()) + } + + sv, ok := file.GetByString("source") + if !ok { + return v, nil + } + + source, ok := sv.AsString() + if !ok { + return v, fmt.Errorf("expected string, got %s", v.Kind()) + } + + libs[source] = append(libs[source], configLocation{ + configPath: p.Append(dyn.Key("remote_path")), + location: v.Location(), + }) + + return v, nil + }) + }) + + if err != nil { + return nil, err + } + + return libs, nil +} + +func (u *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + uploadPath, err := GetUploadBasePath(b) + if err != nil { + return diag.FromErr(err) + } + + // If the client is not initialized, initialize it + // We use client field in mutator to allow for mocking client in testing + if u.client == nil { + filer, err := GetFilerForLibraries(b.WorkspaceClient(), uploadPath) + if err != nil { + return diag.FromErr(err) + } + + u.client = filer + } + + var diags diag.Diagnostics + + libs, err := collectLocalLibraries(b) + if err != nil { + return diag.FromErr(err) + } + + errs, errCtx := errgroup.WithContext(ctx) + errs.SetLimit(maxFilesRequestsInFlight) + + for source := range libs { + errs.Go(func() error { + return UploadFile(errCtx, source, u.client) + }) + } + + if err := errs.Wait(); err != nil { + return diag.FromErr(err) + } + + // Update all the config paths to point to the uploaded location + for source, locations := range libs { + err = b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + remotePath := path.Join(uploadPath, filepath.Base(source)) + + // If the remote path does not start with /Workspace or /Volumes, prepend /Workspace + if !strings.HasPrefix(remotePath, "/Workspace") && !strings.HasPrefix(remotePath, "/Volumes") { + remotePath = "/Workspace" + remotePath + } + for _, location := range locations { + v, err = dyn.SetByPath(v, location.configPath, dyn.NewValue(remotePath, []dyn.Location{location.location})) + if err != nil { + return v, err + } + } + + return v, nil + }) + + if err != nil { + diags = diags.Extend(diag.FromErr(err)) + } + } + + return diags +} + +func (u *upload) Name() string { + return "libraries.Upload" +} + +func GetFilerForLibraries(w *databricks.WorkspaceClient, uploadPath string) (filer.Filer, error) { + if isVolumesPath(uploadPath) { + return filer.NewFilesClient(w, uploadPath) + } + return filer.NewWorkspaceFilesClient(w, uploadPath) +} + +func isVolumesPath(path string) bool { + return strings.HasPrefix(path, "/Volumes/") +} + +// Function to upload file (a library, artifact and etc) to Workspace or UC volume +func UploadFile(ctx context.Context, file string, client filer.Filer) error { + filename := filepath.Base(file) + cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename)) + + f, err := os.Open(file) + if err != nil { + return fmt.Errorf("unable to open %s: %w", file, errors.Unwrap(err)) + } + defer f.Close() + + err = client.Write(ctx, filename, f, filer.OverwriteIfExists, filer.CreateParentDirectories) + if err != nil { + return fmt.Errorf("unable to import %s: %w", filename, err) + } + + log.Infof(ctx, "Upload succeeded") + return nil +} + +func GetUploadBasePath(b *bundle.Bundle) (string, error) { + artifactPath := b.Config.Workspace.ArtifactPath + if artifactPath == "" { + return "", fmt.Errorf("remote artifact path not configured") + } + + return path.Join(artifactPath, ".internal"), nil +} diff --git a/bundle/libraries/upload_test.go b/bundle/libraries/upload_test.go new file mode 100644 index 000000000..82fe6e7c7 --- /dev/null +++ b/bundle/libraries/upload_test.go @@ -0,0 +1,331 @@ +package libraries + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestArtifactUploadForWorkspace(t *testing.T) { + tmpDir := t.TempDir() + whlFolder := filepath.Join(tmpDir, "whl") + testutil.Touch(t, whlFolder, "source.whl") + whlLocalPath := filepath.Join(whlFolder, "source.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/foo/bar/artifacts", + }, + Artifacts: config.Artifacts{ + "whl": { + Type: config.ArtifactPythonWheel, + Files: []config.ArtifactFile{ + {Source: whlLocalPath}, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + { + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + }, + }, + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + filepath.Join("whl", "source.whl"), + "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + mockFiler := mockfiler.NewMockFiler(t) + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source.whl"), + mock.AnythingOfType("*os.File"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil) + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler))) + require.NoError(t, diags.Error()) + + // Test that libraries path is updated + require.Equal(t, "/Workspace/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) + require.Equal(t, "/Workspace/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) + require.Equal(t, "/Workspace/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) +} + +func TestArtifactUploadForVolumes(t *testing.T) { + tmpDir := t.TempDir() + whlFolder := filepath.Join(tmpDir, "whl") + testutil.Touch(t, whlFolder, "source.whl") + whlLocalPath := filepath.Join(whlFolder, "source.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volumes/foo/bar/artifacts", + }, + Artifacts: config.Artifacts{ + "whl": { + Type: config.ArtifactPythonWheel, + Files: []config.ArtifactFile{ + {Source: whlLocalPath}, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Volumes/some/path/mywheel.whl", + }, + }, + }, + { + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Volumes/some/path/mywheel.whl", + }, + }, + }, + }, + }, + }, + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + filepath.Join("whl", "source.whl"), + "/Volumes/some/path/mywheel.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + mockFiler := mockfiler.NewMockFiler(t) + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source.whl"), + mock.AnythingOfType("*os.File"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil) + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler))) + require.NoError(t, diags.Error()) + + // Test that libraries path is updated + require.Equal(t, "/Volumes/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl) + require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) + require.Equal(t, "/Volumes/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) + require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) + require.Equal(t, "/Volumes/foo/bar/artifacts/.internal/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) + require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) +} + +func TestArtifactUploadWithNoLibraryReference(t *testing.T) { + tmpDir := t.TempDir() + whlFolder := filepath.Join(tmpDir, "whl") + testutil.Touch(t, whlFolder, "source.whl") + whlLocalPath := filepath.Join(whlFolder, "source.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Workspace/foo/bar/artifacts", + }, + Artifacts: config.Artifacts{ + "whl": { + Type: config.ArtifactPythonWheel, + Files: []config.ArtifactFile{ + {Source: whlLocalPath}, + }, + }, + }, + }, + } + + mockFiler := mockfiler.NewMockFiler(t) + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source.whl"), + mock.AnythingOfType("*os.File"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil) + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler))) + require.NoError(t, diags.Error()) + + require.Equal(t, "/Workspace/foo/bar/artifacts/.internal/source.whl", b.Config.Artifacts["whl"].Files[0].RemotePath) +} + +func TestUploadMultipleLibraries(t *testing.T) { + tmpDir := t.TempDir() + whlFolder := filepath.Join(tmpDir, "whl") + testutil.Touch(t, whlFolder, "source1.whl") + testutil.Touch(t, whlFolder, "source2.whl") + testutil.Touch(t, whlFolder, "source3.whl") + testutil.Touch(t, whlFolder, "source4.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/foo/bar/artifacts", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + filepath.Join("whl", "*.whl"), + "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + mockFiler := mockfiler.NewMockFiler(t) + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source1.whl"), + mock.AnythingOfType("*os.File"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil).Once() + + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source2.whl"), + mock.AnythingOfType("*os.File"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil).Once() + + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source3.whl"), + mock.AnythingOfType("*os.File"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil).Once() + + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source4.whl"), + mock.AnythingOfType("*os.File"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil).Once() + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ExpandGlobReferences(), UploadWithClient(mockFiler))) + require.NoError(t, diags.Error()) + + // Test that libraries path is updated + require.Len(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, 5) + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/foo/bar/artifacts/.internal/source1.whl"}) + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/foo/bar/artifacts/.internal/source2.whl"}) + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/foo/bar/artifacts/.internal/source3.whl"}) + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/foo/bar/artifacts/.internal/source4.whl"}) + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries, compute.Library{Whl: "/Workspace/Users/foo@bar.com/mywheel.whl"}) + + require.Len(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, 5) + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/foo/bar/artifacts/.internal/source1.whl") + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/foo/bar/artifacts/.internal/source2.whl") + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/foo/bar/artifacts/.internal/source3.whl") + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/foo/bar/artifacts/.internal/source4.whl") + require.Contains(t, b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies, "/Workspace/Users/foo@bar.com/mywheel.whl") +} diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 6929f74ba..ca967c321 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -113,9 +113,9 @@ func Deploy() bundle.Mutator { terraform.StatePull(), deploy.StatePull(), mutator.ValidateGitDetails(), - libraries.ValidateLocalLibrariesExist(), artifacts.CleanUp(), - artifacts.UploadAll(), + libraries.ExpandGlobReferences(), + libraries.Upload(), python.TransformWheelTask(), files.Upload(), deploy.StateUpdate(), diff --git a/bundle/tests/enviroment_key_test.go b/bundle/tests/enviroment_key_test.go index aed3964db..135ef1917 100644 --- a/bundle/tests/enviroment_key_test.go +++ b/bundle/tests/enviroment_key_test.go @@ -18,6 +18,6 @@ func TestEnvironmentKeyProvidedAndNoPanic(t *testing.T) { b, diags := loadTargetWithDiags("./environment_key_only", "default") require.Empty(t, diags) - diags = bundle.Apply(context.Background(), b, libraries.ValidateLocalLibrariesExist()) + diags = bundle.Apply(context.Background(), b, libraries.ExpandGlobReferences()) require.Empty(t, diags) } diff --git a/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml index 1bac4ebad..492861969 100644 --- a/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml +++ b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml @@ -13,10 +13,3 @@ resources: entry_point: "run" libraries: - whl: ./package/*.whl - - task_key: TestTask2 - existing_cluster_id: "0717-aaaaa-bbbbbb" - python_wheel_task: - package_name: "my_test_code" - entry_point: "run" - libraries: - - whl: ./non-existing/*.whl diff --git a/bundle/tests/python_wheel_test.go b/bundle/tests/python_wheel_test.go index 05e4fdfaf..c4d85703c 100644 --- a/bundle/tests/python_wheel_test.go +++ b/bundle/tests/python_wheel_test.go @@ -8,6 +8,9 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/bundle/phases" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -23,7 +26,7 @@ func TestPythonWheelBuild(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(matches)) - match := libraries.ValidateLocalLibrariesExist() + match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } @@ -40,7 +43,7 @@ func TestPythonWheelBuildAutoDetect(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(matches)) - match := libraries.ValidateLocalLibrariesExist() + match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } @@ -57,7 +60,7 @@ func TestPythonWheelBuildAutoDetectWithNotebookTask(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(matches)) - match := libraries.ValidateLocalLibrariesExist() + match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } @@ -70,7 +73,7 @@ func TestPythonWheelWithDBFSLib(t *testing.T) { diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) require.NoError(t, diags.Error()) - match := libraries.ValidateLocalLibrariesExist() + match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } @@ -80,21 +83,23 @@ func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) { b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact_no_setup") require.NoError(t, err) - diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) + b.Config.Workspace.ArtifactPath = "/foo/bar" + + mockFiler := mockfiler.NewMockFiler(t) + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("my_test_code-0.0.1-py3-none-any.whl"), + mock.AnythingOfType("*os.File"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil) + + u := libraries.UploadWithClient(mockFiler) + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build(), libraries.ExpandGlobReferences(), u)) require.NoError(t, diags.Error()) + require.Empty(t, diags) - match := libraries.ValidateLocalLibrariesExist() - diags = bundle.Apply(ctx, b, match) - require.ErrorContains(t, diags.Error(), "./non-existing/*.whl") - - require.NotZero(t, len(b.Config.Artifacts)) - - artifact := b.Config.Artifacts["my_test_code-0.0.1-py3-none-any.whl"] - require.NotNil(t, artifact) - require.Empty(t, artifact.BuildCommand) - require.Contains(t, artifact.Files[0].Source, filepath.Join(b.RootPath, "package", - "my_test_code-0.0.1-py3-none-any.whl", - )) + require.Equal(t, "/Workspace/foo/bar/.internal/my_test_code-0.0.1-py3-none-any.whl", b.Config.Resources.Jobs["test_job"].JobSettings.Tasks[0].Libraries[0].Whl) } func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) { @@ -109,7 +114,7 @@ func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) { require.NoError(t, err) require.Equal(t, 1, len(matches)) - match := libraries.ValidateLocalLibrariesExist() + match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } @@ -126,7 +131,7 @@ func TestPythonWheelBuildMultiple(t *testing.T) { require.NoError(t, err) require.Equal(t, 2, len(matches)) - match := libraries.ValidateLocalLibrariesExist() + match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } @@ -139,7 +144,7 @@ func TestPythonWheelNoBuild(t *testing.T) { diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) require.NoError(t, diags.Error()) - match := libraries.ValidateLocalLibrariesExist() + match := libraries.ExpandGlobReferences() diags = bundle.Apply(ctx, b, match) require.NoError(t, diags.Error()) } diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 46c236a4e..bae8073fc 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -8,9 +8,9 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/artifacts" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" "github.com/databricks/databricks-sdk-go/service/compute" @@ -74,7 +74,7 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { }, } - diags := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) + diags := bundle.Apply(ctx, b, bundle.Seq(libraries.ExpandGlobReferences(), libraries.Upload())) require.NoError(t, diags.Error()) // The remote path attribute on the artifact file should have been set. @@ -138,7 +138,7 @@ func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) }, } - diags := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) + diags := bundle.Apply(ctx, b, bundle.Seq(libraries.ExpandGlobReferences(), libraries.Upload())) require.NoError(t, diags.Error()) // The remote path attribute on the artifact file should have been set. @@ -207,7 +207,7 @@ func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { }, } - diags := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) + diags := bundle.Apply(ctx, b, bundle.Seq(libraries.ExpandGlobReferences(), libraries.Upload())) require.NoError(t, diags.Error()) // The remote path attribute on the artifact file should have been set. From 1225fc0c13edde6cd267d7523dd4bfa00621fa82 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 14 Aug 2024 18:31:00 +0530 Subject: [PATCH 05/36] Fix host resolution order in `auth login` (#1370) ## Changes The `auth login` command today prefers a host URL specified in a profile before selecting the one explicitly provided by a user as a command line argument. This PR fixes this bug and refactors the code to make it more linear and easy to read. Note that the same issue exists in the `auth token` command and is fixed here as well. ## Tests Unit tests, and manual testing. --- cmd/auth/auth.go | 27 ++++++------ cmd/auth/login.go | 70 +++++++++++++++++++++----------- cmd/auth/login_test.go | 68 +++++++++++++++++++++++++++++++ cmd/auth/testdata/.databrickscfg | 9 ++++ libs/auth/oauth.go | 1 - 5 files changed, 137 insertions(+), 38 deletions(-) create mode 100644 cmd/auth/testdata/.databrickscfg diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go index 79e1063b1..ceceae25c 100644 --- a/cmd/auth/auth.go +++ b/cmd/auth/auth.go @@ -2,6 +2,7 @@ package auth import ( "context" + "fmt" "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/cmdio" @@ -34,25 +35,23 @@ GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html`, } func promptForHost(ctx context.Context) (string, error) { - prompt := cmdio.Prompt(ctx) - prompt.Label = "Databricks Host (e.g. https://.cloud.databricks.com)" - // Validate? - host, err := prompt.Run() - if err != nil { - return "", err + if !cmdio.IsInTTY(ctx) { + return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify a host using --host") } - return host, nil + + prompt := cmdio.Prompt(ctx) + prompt.Label = "Databricks host (e.g. https://.cloud.databricks.com)" + return prompt.Run() } func promptForAccountID(ctx context.Context) (string, error) { + if !cmdio.IsInTTY(ctx) { + return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify an account ID using --account-id") + } + prompt := cmdio.Prompt(ctx) - prompt.Label = "Databricks Account ID" + prompt.Label = "Databricks account ID" prompt.Default = "" prompt.AllowEdit = true - // Validate? - accountId, err := prompt.Run() - if err != nil { - return "", err - } - return accountId, nil + return prompt.Run() } diff --git a/cmd/auth/login.go b/cmd/auth/login.go index 11cba8e5f..f87a2a027 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -17,18 +17,16 @@ import ( "github.com/spf13/cobra" ) -func configureHost(ctx context.Context, persistentAuth *auth.PersistentAuth, args []string, argIndex int) error { - if len(args) > argIndex { - persistentAuth.Host = args[argIndex] - return nil +func promptForProfile(ctx context.Context, defaultValue string) (string, error) { + if !cmdio.IsInTTY(ctx) { + return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify a profile using --profile") } - host, err := promptForHost(ctx) - if err != nil { - return err - } - persistentAuth.Host = host - return nil + prompt := cmdio.Prompt(ctx) + prompt.Label = "Databricks profile name" + prompt.Default = defaultValue + prompt.AllowEdit = true + return prompt.Run() } const minimalDbConnectVersion = "13.1" @@ -93,23 +91,18 @@ depends on the existing profiles you have set in your configuration file cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() + profileName := cmd.Flag("profile").Value.String() - var profileName string - profileFlag := cmd.Flag("profile") - if profileFlag != nil && profileFlag.Value.String() != "" { - profileName = profileFlag.Value.String() - } else if cmdio.IsInTTY(ctx) { - prompt := cmdio.Prompt(ctx) - prompt.Label = "Databricks Profile Name" - prompt.Default = persistentAuth.ProfileName() - prompt.AllowEdit = true - profile, err := prompt.Run() + // If the user has not specified a profile name, prompt for one. + if profileName == "" { + var err error + profileName, err = promptForProfile(ctx, persistentAuth.ProfileName()) if err != nil { return err } - profileName = profile } + // Set the host and account-id based on the provided arguments and flags. err := setHostAndAccountId(ctx, profileName, persistentAuth, args) if err != nil { return err @@ -167,7 +160,23 @@ depends on the existing profiles you have set in your configuration file return cmd } +// Sets the host in the persistentAuth object based on the provided arguments and flags. +// Follows the following precedence: +// 1. [HOST] (first positional argument) or --host flag. Error if both are specified. +// 2. Profile host, if available. +// 3. Prompt the user for the host. +// +// Set the account in the persistentAuth object based on the flags. +// Follows the following precedence: +// 1. --account-id flag. +// 2. account-id from the specified profile, if available. +// 3. Prompt the user for the account-id. func setHostAndAccountId(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { + // If both [HOST] and --host are provided, return an error. + if len(args) > 0 && persistentAuth.Host != "" { + return fmt.Errorf("please only provide a host as an argument or a flag, not both") + } + profiler := profile.GetProfiler(ctx) // If the chosen profile has a hostname and the user hasn't specified a host, infer the host from the profile. profiles, err := profiler.LoadProfiles(ctx, profile.WithName(profileName)) @@ -177,17 +186,32 @@ func setHostAndAccountId(ctx context.Context, profileName string, persistentAuth } if persistentAuth.Host == "" { - if len(profiles) > 0 && profiles[0].Host != "" { + if len(args) > 0 { + // If [HOST] is provided, set the host to the provided positional argument. + persistentAuth.Host = args[0] + } else if len(profiles) > 0 && profiles[0].Host != "" { + // If neither [HOST] nor --host are provided, and the profile has a host, use it. persistentAuth.Host = profiles[0].Host } else { - configureHost(ctx, persistentAuth, args, 0) + // If neither [HOST] nor --host are provided, and the profile does not have a host, + // then prompt the user for a host. + hostName, err := promptForHost(ctx) + if err != nil { + return err + } + persistentAuth.Host = hostName } } + + // If the account-id was not provided as a cmd line flag, try to read it from + // the specified profile. isAccountClient := (&config.Config{Host: persistentAuth.Host}).IsAccountClient() if isAccountClient && persistentAuth.AccountID == "" { if len(profiles) > 0 && profiles[0].AccountID != "" { persistentAuth.AccountID = profiles[0].AccountID } else { + // Prompt user for the account-id if it we could not get it from a + // profile. accountId, err := promptForAccountID(ctx) if err != nil { return err diff --git a/cmd/auth/login_test.go b/cmd/auth/login_test.go index ce3ca5ae5..d0fa5a16b 100644 --- a/cmd/auth/login_test.go +++ b/cmd/auth/login_test.go @@ -5,8 +5,10 @@ import ( "testing" "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/env" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSetHostDoesNotFailWithNoDatabrickscfg(t *testing.T) { @@ -15,3 +17,69 @@ func TestSetHostDoesNotFailWithNoDatabrickscfg(t *testing.T) { err := setHostAndAccountId(ctx, "foo", &auth.PersistentAuth{Host: "test"}, []string{}) assert.NoError(t, err) } + +func TestSetHost(t *testing.T) { + var persistentAuth auth.PersistentAuth + t.Setenv("DATABRICKS_CONFIG_FILE", "./testdata/.databrickscfg") + ctx, _ := cmdio.SetupTest(context.Background()) + + // Test error when both flag and argument are provided + persistentAuth.Host = "val from --host" + err := setHostAndAccountId(ctx, "profile-1", &persistentAuth, []string{"val from [HOST]"}) + assert.EqualError(t, err, "please only provide a host as an argument or a flag, not both") + + // Test setting host from flag + persistentAuth.Host = "val from --host" + err = setHostAndAccountId(ctx, "profile-1", &persistentAuth, []string{}) + assert.NoError(t, err) + assert.Equal(t, "val from --host", persistentAuth.Host) + + // Test setting host from argument + persistentAuth.Host = "" + err = setHostAndAccountId(ctx, "profile-1", &persistentAuth, []string{"val from [HOST]"}) + assert.NoError(t, err) + assert.Equal(t, "val from [HOST]", persistentAuth.Host) + + // Test setting host from profile + persistentAuth.Host = "" + err = setHostAndAccountId(ctx, "profile-1", &persistentAuth, []string{}) + assert.NoError(t, err) + assert.Equal(t, "https://www.host1.com", persistentAuth.Host) + + // Test setting host from profile + persistentAuth.Host = "" + err = setHostAndAccountId(ctx, "profile-2", &persistentAuth, []string{}) + assert.NoError(t, err) + assert.Equal(t, "https://www.host2.com", persistentAuth.Host) + + // Test host is not set. Should prompt. + persistentAuth.Host = "" + err = setHostAndAccountId(ctx, "", &persistentAuth, []string{}) + assert.EqualError(t, err, "the command is being run in a non-interactive environment, please specify a host using --host") +} + +func TestSetAccountId(t *testing.T) { + var persistentAuth auth.PersistentAuth + t.Setenv("DATABRICKS_CONFIG_FILE", "./testdata/.databrickscfg") + ctx, _ := cmdio.SetupTest(context.Background()) + + // Test setting account-id from flag + persistentAuth.AccountID = "val from --account-id" + err := setHostAndAccountId(ctx, "account-profile", &persistentAuth, []string{}) + assert.NoError(t, err) + assert.Equal(t, "https://accounts.cloud.databricks.com", persistentAuth.Host) + assert.Equal(t, "val from --account-id", persistentAuth.AccountID) + + // Test setting account_id from profile + persistentAuth.AccountID = "" + err = setHostAndAccountId(ctx, "account-profile", &persistentAuth, []string{}) + require.NoError(t, err) + assert.Equal(t, "https://accounts.cloud.databricks.com", persistentAuth.Host) + assert.Equal(t, "id-from-profile", persistentAuth.AccountID) + + // Neither flag nor profile account-id is set, should prompt + persistentAuth.AccountID = "" + persistentAuth.Host = "https://accounts.cloud.databricks.com" + err = setHostAndAccountId(ctx, "", &persistentAuth, []string{}) + assert.EqualError(t, err, "the command is being run in a non-interactive environment, please specify an account ID using --account-id") +} diff --git a/cmd/auth/testdata/.databrickscfg b/cmd/auth/testdata/.databrickscfg new file mode 100644 index 000000000..06e55224a --- /dev/null +++ b/cmd/auth/testdata/.databrickscfg @@ -0,0 +1,9 @@ +[profile-1] +host = https://www.host1.com + +[profile-2] +host = https://www.host2.com + +[account-profile] +host = https://accounts.cloud.databricks.com +account_id = id-from-profile diff --git a/libs/auth/oauth.go b/libs/auth/oauth.go index 1f3e032de..7c1cb9576 100644 --- a/libs/auth/oauth.go +++ b/libs/auth/oauth.go @@ -105,7 +105,6 @@ func (a *PersistentAuth) Load(ctx context.Context) (*oauth2.Token, error) { } func (a *PersistentAuth) ProfileName() string { - // TODO: get profile name from interactive input if a.AccountID != "" { return fmt.Sprintf("ACCOUNT-%s", a.AccountID) } From 53041346f2360d2d07ae9c7abb899e3364100b7c Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Wed, 14 Aug 2024 15:21:40 +0200 Subject: [PATCH 06/36] Update VS Code settings to match latest value from IDE plugin (#1677) ## Changes This updates the `python.envFile` property from VS Code's settings file to use the value that is set by the latest version of the IDE plugin. This change will make it a bit easier for contributors who work on the CLI code base with the plugin enabled. --- .vscode/settings.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.vscode/settings.json b/.vscode/settings.json index 869465286..9697e221d 100644 --- a/.vscode/settings.json +++ b/.vscode/settings.json @@ -6,7 +6,7 @@ "files.trimTrailingWhitespace": true, "files.insertFinalNewline": true, "files.trimFinalNewlines": true, - "python.envFile": "${workspaceFolder}/.databricks/.databricks.env", + "python.envFile": "${workspaceRoot}/.env", "databricks.python.envFile": "${workspaceFolder}/.env", "python.analysis.stubPath": ".vscode", "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", From f32902dc0466118f9501e3d85c5774c6ac2c88b4 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 14 Aug 2024 15:52:09 +0200 Subject: [PATCH 07/36] Use `service.NamedIdMap` to make lookup generation deterministic (#1678) ## Changes Relies on this PR from Go SDK https://github.com/databricks/databricks-sdk-go/pull/1016 See explanation there --- .codegen/lookup.go.tmpl | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.codegen/lookup.go.tmpl b/.codegen/lookup.go.tmpl index 7e643a90c..431709f90 100644 --- a/.codegen/lookup.go.tmpl +++ b/.codegen/lookup.go.tmpl @@ -116,12 +116,12 @@ func allResolvers() *resolvers { {{range .Services -}} {{- if in $allowlist .KebabName -}} r.{{.Singular.PascalName}} = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - entity, err := w.{{.PascalName}}.GetBy{{range .List.NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name) + entity, err := w.{{.PascalName}}.GetBy{{range .NamedIdMap.NamePath}}{{.PascalName}}{{end}}(ctx, name) if err != nil { return "", err } - return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .List.NamedIdMap.IdPath 0).PascalName) }}), nil + return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .NamedIdMap.IdPath 0).PascalName) }}), nil } {{end -}} {{- end}} From 7aaaee2512a62a46f99820a32b64e3cd888b5b7d Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Wed, 14 Aug 2024 17:59:55 +0200 Subject: [PATCH 08/36] [Internal] Remove dependency to the `openapi` package of the Go SDK (#1676) ## Changes This PR removes the dependency to the `databricks-sdk-go/openapi` package by copying the struct and functions that are needed in a new `schema/spec.go` file. The reason to remove this dependency is that it is being deprecated. Copying the code in the `cli` repo seems reasonable given that it only uses a couple of very small structs. ## Tests Verified that CLI code can be properly generated after this change. --- bundle/schema/docs.go | 3 +-- bundle/schema/openapi.go | 3 +-- bundle/schema/openapi_test.go | 19 +++++++++---------- bundle/schema/spec.go | 11 +++++++++++ 4 files changed, 22 insertions(+), 14 deletions(-) create mode 100644 bundle/schema/spec.go diff --git a/bundle/schema/docs.go b/bundle/schema/docs.go index 5b960ea55..6e9289f92 100644 --- a/bundle/schema/docs.go +++ b/bundle/schema/docs.go @@ -9,7 +9,6 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/jsonschema" - "github.com/databricks/databricks-sdk-go/openapi" ) // A subset of Schema struct @@ -63,7 +62,7 @@ func UpdateBundleDescriptions(openapiSpecPath string) (*Docs, error) { if err != nil { return nil, err } - spec := &openapi.Specification{} + spec := &Specification{} err = json.Unmarshal(openapiSpec, spec) if err != nil { return nil, err diff --git a/bundle/schema/openapi.go b/bundle/schema/openapi.go index 1756d5165..0d896b87c 100644 --- a/bundle/schema/openapi.go +++ b/bundle/schema/openapi.go @@ -6,12 +6,11 @@ import ( "strings" "github.com/databricks/cli/libs/jsonschema" - "github.com/databricks/databricks-sdk-go/openapi" ) type OpenapiReader struct { // OpenAPI spec to read schemas from. - OpenapiSpec *openapi.Specification + OpenapiSpec *Specification // In-memory cache of schemas read from the OpenAPI spec. memo map[string]jsonschema.Schema diff --git a/bundle/schema/openapi_test.go b/bundle/schema/openapi_test.go index 359b1e58a..4d393cf37 100644 --- a/bundle/schema/openapi_test.go +++ b/bundle/schema/openapi_test.go @@ -5,7 +5,6 @@ import ( "testing" "github.com/databricks/cli/libs/jsonschema" - "github.com/databricks/databricks-sdk-go/openapi" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -45,7 +44,7 @@ func TestReadSchemaForObject(t *testing.T) { } } ` - spec := &openapi.Specification{} + spec := &Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, memo: make(map[string]jsonschema.Schema), @@ -103,7 +102,7 @@ func TestReadSchemaForArray(t *testing.T) { } } }` - spec := &openapi.Specification{} + spec := &Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, memo: make(map[string]jsonschema.Schema), @@ -149,7 +148,7 @@ func TestReadSchemaForMap(t *testing.T) { } } }` - spec := &openapi.Specification{} + spec := &Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, memo: make(map[string]jsonschema.Schema), @@ -198,7 +197,7 @@ func TestRootReferenceIsResolved(t *testing.T) { } } }` - spec := &openapi.Specification{} + spec := &Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, memo: make(map[string]jsonschema.Schema), @@ -248,7 +247,7 @@ func TestSelfReferenceLoopErrors(t *testing.T) { } } }` - spec := &openapi.Specification{} + spec := &Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, memo: make(map[string]jsonschema.Schema), @@ -282,7 +281,7 @@ func TestCrossReferenceLoopErrors(t *testing.T) { } } }` - spec := &openapi.Specification{} + spec := &Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, memo: make(map[string]jsonschema.Schema), @@ -327,7 +326,7 @@ func TestReferenceResolutionForMapInObject(t *testing.T) { } } }` - spec := &openapi.Specification{} + spec := &Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, memo: make(map[string]jsonschema.Schema), @@ -397,7 +396,7 @@ func TestReferenceResolutionForArrayInObject(t *testing.T) { } } }` - spec := &openapi.Specification{} + spec := &Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, memo: make(map[string]jsonschema.Schema), @@ -460,7 +459,7 @@ func TestReferenceResolutionDoesNotOverwriteDescriptions(t *testing.T) { } } }` - spec := &openapi.Specification{} + spec := &Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, memo: make(map[string]jsonschema.Schema), diff --git a/bundle/schema/spec.go b/bundle/schema/spec.go new file mode 100644 index 000000000..fdc31a4ca --- /dev/null +++ b/bundle/schema/spec.go @@ -0,0 +1,11 @@ +package schema + +import "github.com/databricks/cli/libs/jsonschema" + +type Specification struct { + Components *Components `json:"components"` +} + +type Components struct { + Schemas map[string]*jsonschema.Schema `json:"schemas,omitempty"` +} From 6b3d33a8464722001f29786f6095c54459ec7a6d Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Thu, 15 Aug 2024 14:43:39 +0200 Subject: [PATCH 09/36] Upgrade TF provider to 1.50.0 (#1681) ## Changes See https://github.com/databricks/terraform-provider-databricks/pull/3900 ## Tests * Manually test on a bundle with a pipeline and a schema * Integration tests pass --- bundle/internal/tf/codegen/schema/version.go | 2 +- .../tf/schema/data_source_notebook.go | 15 +- bundle/internal/tf/schema/data_source_user.go | 1 + .../tf/schema/resource_cluster_policy.go | 2 +- .../schema/resource_metastore_data_access.go | 7 + .../tf/schema/resource_model_serving.go | 56 ++++--- .../internal/tf/schema/resource_notebook.go | 1 + .../resource_notification_destination.go | 46 ++++++ .../internal/tf/schema/resource_pipeline.go | 150 ++++++++++++++---- .../tf/schema/resource_storage_credential.go | 7 + bundle/internal/tf/schema/resources.go | 2 + bundle/internal/tf/schema/root.go | 2 +- 12 files changed, 234 insertions(+), 57 deletions(-) create mode 100644 bundle/internal/tf/schema/resource_notification_destination.go diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index 39d4f66c1..efb297243 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.49.1" +const ProviderVersion = "1.50.0" diff --git a/bundle/internal/tf/schema/data_source_notebook.go b/bundle/internal/tf/schema/data_source_notebook.go index ebfbe2dfb..bf97c19a8 100644 --- a/bundle/internal/tf/schema/data_source_notebook.go +++ b/bundle/internal/tf/schema/data_source_notebook.go @@ -3,11 +3,12 @@ package schema type DataSourceNotebook struct { - Content string `json:"content,omitempty"` - Format string `json:"format"` - Id string `json:"id,omitempty"` - Language string `json:"language,omitempty"` - ObjectId int `json:"object_id,omitempty"` - ObjectType string `json:"object_type,omitempty"` - Path string `json:"path"` + Content string `json:"content,omitempty"` + Format string `json:"format"` + Id string `json:"id,omitempty"` + Language string `json:"language,omitempty"` + ObjectId int `json:"object_id,omitempty"` + ObjectType string `json:"object_type,omitempty"` + Path string `json:"path"` + WorkspacePath string `json:"workspace_path,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_user.go b/bundle/internal/tf/schema/data_source_user.go index 78981f29b..ea20c066e 100644 --- a/bundle/internal/tf/schema/data_source_user.go +++ b/bundle/internal/tf/schema/data_source_user.go @@ -4,6 +4,7 @@ package schema type DataSourceUser struct { AclPrincipalId string `json:"acl_principal_id,omitempty"` + Active bool `json:"active,omitempty"` Alphanumeric string `json:"alphanumeric,omitempty"` ApplicationId string `json:"application_id,omitempty"` DisplayName string `json:"display_name,omitempty"` diff --git a/bundle/internal/tf/schema/resource_cluster_policy.go b/bundle/internal/tf/schema/resource_cluster_policy.go index d8111fef2..7e15a7b12 100644 --- a/bundle/internal/tf/schema/resource_cluster_policy.go +++ b/bundle/internal/tf/schema/resource_cluster_policy.go @@ -33,7 +33,7 @@ type ResourceClusterPolicy struct { Description string `json:"description,omitempty"` Id string `json:"id,omitempty"` MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"` - Name string `json:"name"` + Name string `json:"name,omitempty"` PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"` PolicyFamilyId string `json:"policy_family_id,omitempty"` PolicyId string `json:"policy_id,omitempty"` diff --git a/bundle/internal/tf/schema/resource_metastore_data_access.go b/bundle/internal/tf/schema/resource_metastore_data_access.go index 2e2ff4eb4..ef8c34aa7 100644 --- a/bundle/internal/tf/schema/resource_metastore_data_access.go +++ b/bundle/internal/tf/schema/resource_metastore_data_access.go @@ -20,6 +20,12 @@ type ResourceMetastoreDataAccessAzureServicePrincipal struct { DirectoryId string `json:"directory_id"` } +type ResourceMetastoreDataAccessCloudflareApiToken struct { + AccessKeyId string `json:"access_key_id"` + AccountId string `json:"account_id"` + SecretAccessKey string `json:"secret_access_key"` +} + type ResourceMetastoreDataAccessDatabricksGcpServiceAccount struct { CredentialId string `json:"credential_id,omitempty"` Email string `json:"email,omitempty"` @@ -46,6 +52,7 @@ type ResourceMetastoreDataAccess struct { AwsIamRole *ResourceMetastoreDataAccessAwsIamRole `json:"aws_iam_role,omitempty"` AzureManagedIdentity *ResourceMetastoreDataAccessAzureManagedIdentity `json:"azure_managed_identity,omitempty"` AzureServicePrincipal *ResourceMetastoreDataAccessAzureServicePrincipal `json:"azure_service_principal,omitempty"` + CloudflareApiToken *ResourceMetastoreDataAccessCloudflareApiToken `json:"cloudflare_api_token,omitempty"` DatabricksGcpServiceAccount *ResourceMetastoreDataAccessDatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` GcpServiceAccountKey *ResourceMetastoreDataAccessGcpServiceAccountKey `json:"gcp_service_account_key,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_model_serving.go b/bundle/internal/tf/schema/resource_model_serving.go index f5ffbbe5e..379807a5d 100644 --- a/bundle/internal/tf/schema/resource_model_serving.go +++ b/bundle/internal/tf/schema/resource_model_serving.go @@ -10,43 +10,60 @@ type ResourceModelServingConfigAutoCaptureConfig struct { } type ResourceModelServingConfigServedEntitiesExternalModelAi21LabsConfig struct { - Ai21LabsApiKey string `json:"ai21labs_api_key"` + Ai21LabsApiKey string `json:"ai21labs_api_key,omitempty"` + Ai21LabsApiKeyPlaintext string `json:"ai21labs_api_key_plaintext,omitempty"` } type ResourceModelServingConfigServedEntitiesExternalModelAmazonBedrockConfig struct { - AwsAccessKeyId string `json:"aws_access_key_id"` - AwsRegion string `json:"aws_region"` - AwsSecretAccessKey string `json:"aws_secret_access_key"` - BedrockProvider string `json:"bedrock_provider"` + AwsAccessKeyId string `json:"aws_access_key_id,omitempty"` + AwsAccessKeyIdPlaintext string `json:"aws_access_key_id_plaintext,omitempty"` + AwsRegion string `json:"aws_region"` + AwsSecretAccessKey string `json:"aws_secret_access_key,omitempty"` + AwsSecretAccessKeyPlaintext string `json:"aws_secret_access_key_plaintext,omitempty"` + BedrockProvider string `json:"bedrock_provider"` } type ResourceModelServingConfigServedEntitiesExternalModelAnthropicConfig struct { - AnthropicApiKey string `json:"anthropic_api_key"` + AnthropicApiKey string `json:"anthropic_api_key,omitempty"` + AnthropicApiKeyPlaintext string `json:"anthropic_api_key_plaintext,omitempty"` } type ResourceModelServingConfigServedEntitiesExternalModelCohereConfig struct { - CohereApiKey string `json:"cohere_api_key"` + CohereApiBase string `json:"cohere_api_base,omitempty"` + CohereApiKey string `json:"cohere_api_key,omitempty"` + CohereApiKeyPlaintext string `json:"cohere_api_key_plaintext,omitempty"` } type ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServingConfig struct { - DatabricksApiToken string `json:"databricks_api_token"` - DatabricksWorkspaceUrl string `json:"databricks_workspace_url"` + DatabricksApiToken string `json:"databricks_api_token,omitempty"` + DatabricksApiTokenPlaintext string `json:"databricks_api_token_plaintext,omitempty"` + DatabricksWorkspaceUrl string `json:"databricks_workspace_url"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig struct { + PrivateKey string `json:"private_key,omitempty"` + PrivateKeyPlaintext string `json:"private_key_plaintext,omitempty"` + ProjectId string `json:"project_id,omitempty"` + Region string `json:"region,omitempty"` } type ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig struct { - MicrosoftEntraClientId string `json:"microsoft_entra_client_id,omitempty"` - MicrosoftEntraClientSecret string `json:"microsoft_entra_client_secret,omitempty"` - MicrosoftEntraTenantId string `json:"microsoft_entra_tenant_id,omitempty"` - OpenaiApiBase string `json:"openai_api_base,omitempty"` - OpenaiApiKey string `json:"openai_api_key,omitempty"` - OpenaiApiType string `json:"openai_api_type,omitempty"` - OpenaiApiVersion string `json:"openai_api_version,omitempty"` - OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"` - OpenaiOrganization string `json:"openai_organization,omitempty"` + MicrosoftEntraClientId string `json:"microsoft_entra_client_id,omitempty"` + MicrosoftEntraClientSecret string `json:"microsoft_entra_client_secret,omitempty"` + MicrosoftEntraClientSecretPlaintext string `json:"microsoft_entra_client_secret_plaintext,omitempty"` + MicrosoftEntraTenantId string `json:"microsoft_entra_tenant_id,omitempty"` + OpenaiApiBase string `json:"openai_api_base,omitempty"` + OpenaiApiKey string `json:"openai_api_key,omitempty"` + OpenaiApiKeyPlaintext string `json:"openai_api_key_plaintext,omitempty"` + OpenaiApiType string `json:"openai_api_type,omitempty"` + OpenaiApiVersion string `json:"openai_api_version,omitempty"` + OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"` + OpenaiOrganization string `json:"openai_organization,omitempty"` } type ResourceModelServingConfigServedEntitiesExternalModelPalmConfig struct { - PalmApiKey string `json:"palm_api_key"` + PalmApiKey string `json:"palm_api_key,omitempty"` + PalmApiKeyPlaintext string `json:"palm_api_key_plaintext,omitempty"` } type ResourceModelServingConfigServedEntitiesExternalModel struct { @@ -58,6 +75,7 @@ type ResourceModelServingConfigServedEntitiesExternalModel struct { AnthropicConfig *ResourceModelServingConfigServedEntitiesExternalModelAnthropicConfig `json:"anthropic_config,omitempty"` CohereConfig *ResourceModelServingConfigServedEntitiesExternalModelCohereConfig `json:"cohere_config,omitempty"` DatabricksModelServingConfig *ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"` + GoogleCloudVertexAiConfig *ResourceModelServingConfigServedEntitiesExternalModelGoogleCloudVertexAiConfig `json:"google_cloud_vertex_ai_config,omitempty"` OpenaiConfig *ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig `json:"openai_config,omitempty"` PalmConfig *ResourceModelServingConfigServedEntitiesExternalModelPalmConfig `json:"palm_config,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_notebook.go b/bundle/internal/tf/schema/resource_notebook.go index 8fb5a5387..4e5d4cbc3 100644 --- a/bundle/internal/tf/schema/resource_notebook.go +++ b/bundle/internal/tf/schema/resource_notebook.go @@ -13,4 +13,5 @@ type ResourceNotebook struct { Path string `json:"path"` Source string `json:"source,omitempty"` Url string `json:"url,omitempty"` + WorkspacePath string `json:"workspace_path,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_notification_destination.go b/bundle/internal/tf/schema/resource_notification_destination.go new file mode 100644 index 000000000..0ed9cff60 --- /dev/null +++ b/bundle/internal/tf/schema/resource_notification_destination.go @@ -0,0 +1,46 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceNotificationDestinationConfigEmail struct { + Addresses []string `json:"addresses,omitempty"` +} + +type ResourceNotificationDestinationConfigGenericWebhook struct { + Password string `json:"password,omitempty"` + PasswordSet bool `json:"password_set,omitempty"` + Url string `json:"url,omitempty"` + UrlSet bool `json:"url_set,omitempty"` + Username string `json:"username,omitempty"` + UsernameSet bool `json:"username_set,omitempty"` +} + +type ResourceNotificationDestinationConfigMicrosoftTeams struct { + Url string `json:"url,omitempty"` + UrlSet bool `json:"url_set,omitempty"` +} + +type ResourceNotificationDestinationConfigPagerduty struct { + IntegrationKey string `json:"integration_key,omitempty"` + IntegrationKeySet bool `json:"integration_key_set,omitempty"` +} + +type ResourceNotificationDestinationConfigSlack struct { + Url string `json:"url,omitempty"` + UrlSet bool `json:"url_set,omitempty"` +} + +type ResourceNotificationDestinationConfig struct { + Email *ResourceNotificationDestinationConfigEmail `json:"email,omitempty"` + GenericWebhook *ResourceNotificationDestinationConfigGenericWebhook `json:"generic_webhook,omitempty"` + MicrosoftTeams *ResourceNotificationDestinationConfigMicrosoftTeams `json:"microsoft_teams,omitempty"` + Pagerduty *ResourceNotificationDestinationConfigPagerduty `json:"pagerduty,omitempty"` + Slack *ResourceNotificationDestinationConfigSlack `json:"slack,omitempty"` +} + +type ResourceNotificationDestination struct { + DestinationType string `json:"destination_type,omitempty"` + DisplayName string `json:"display_name"` + Id string `json:"id,omitempty"` + Config *ResourceNotificationDestinationConfig `json:"config,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 20c25c1e2..154686463 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -3,15 +3,17 @@ package schema type ResourcePipelineClusterAutoscale struct { - MaxWorkers int `json:"max_workers,omitempty"` - MinWorkers int `json:"min_workers,omitempty"` + MaxWorkers int `json:"max_workers"` + MinWorkers int `json:"min_workers"` Mode string `json:"mode,omitempty"` } type ResourcePipelineClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -19,10 +21,16 @@ type ResourcePipelineClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourcePipelineClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourcePipelineClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourcePipelineClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourcePipelineClusterClusterLogConfDbfs struct { @@ -127,8 +135,69 @@ type ResourcePipelineFilters struct { Include []string `json:"include,omitempty"` } +type ResourcePipelineGatewayDefinition struct { + ConnectionId string `json:"connection_id,omitempty"` + GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"` + GatewayStorageName string `json:"gateway_storage_name,omitempty"` + GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` +} + +type ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration struct { + PrimaryKeys []string `json:"primary_keys,omitempty"` + SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` + ScdType string `json:"scd_type,omitempty"` +} + +type ResourcePipelineIngestionDefinitionObjectsSchema struct { + DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationSchema string `json:"destination_schema,omitempty"` + SourceCatalog string `json:"source_catalog,omitempty"` + SourceSchema string `json:"source_schema,omitempty"` + TableConfiguration *ResourcePipelineIngestionDefinitionObjectsSchemaTableConfiguration `json:"table_configuration,omitempty"` +} + +type ResourcePipelineIngestionDefinitionObjectsTableTableConfiguration struct { + PrimaryKeys []string `json:"primary_keys,omitempty"` + SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` + ScdType string `json:"scd_type,omitempty"` +} + +type ResourcePipelineIngestionDefinitionObjectsTable struct { + DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationSchema string `json:"destination_schema,omitempty"` + DestinationTable string `json:"destination_table,omitempty"` + SourceCatalog string `json:"source_catalog,omitempty"` + SourceSchema string `json:"source_schema,omitempty"` + SourceTable string `json:"source_table,omitempty"` + TableConfiguration *ResourcePipelineIngestionDefinitionObjectsTableTableConfiguration `json:"table_configuration,omitempty"` +} + +type ResourcePipelineIngestionDefinitionObjects struct { + Schema *ResourcePipelineIngestionDefinitionObjectsSchema `json:"schema,omitempty"` + Table *ResourcePipelineIngestionDefinitionObjectsTable `json:"table,omitempty"` +} + +type ResourcePipelineIngestionDefinitionTableConfiguration struct { + PrimaryKeys []string `json:"primary_keys,omitempty"` + SalesforceIncludeFormulaFields bool `json:"salesforce_include_formula_fields,omitempty"` + ScdType string `json:"scd_type,omitempty"` +} + +type ResourcePipelineIngestionDefinition struct { + ConnectionName string `json:"connection_name,omitempty"` + IngestionGatewayId string `json:"ingestion_gateway_id,omitempty"` + Objects []ResourcePipelineIngestionDefinitionObjects `json:"objects,omitempty"` + TableConfiguration *ResourcePipelineIngestionDefinitionTableConfiguration `json:"table_configuration,omitempty"` +} + +type ResourcePipelineLatestUpdates struct { + CreationTime string `json:"creation_time,omitempty"` + State string `json:"state,omitempty"` + UpdateId string `json:"update_id,omitempty"` +} + type ResourcePipelineLibraryFile struct { - Path string `json:"path"` + Path string `json:"path,omitempty"` } type ResourcePipelineLibraryMaven struct { @@ -138,7 +207,7 @@ type ResourcePipelineLibraryMaven struct { } type ResourcePipelineLibraryNotebook struct { - Path string `json:"path"` + Path string `json:"path,omitempty"` } type ResourcePipelineLibrary struct { @@ -150,28 +219,53 @@ type ResourcePipelineLibrary struct { } type ResourcePipelineNotification struct { - Alerts []string `json:"alerts"` - EmailRecipients []string `json:"email_recipients"` + Alerts []string `json:"alerts,omitempty"` + EmailRecipients []string `json:"email_recipients,omitempty"` +} + +type ResourcePipelineTriggerCron struct { + QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"` + TimezoneId string `json:"timezone_id,omitempty"` +} + +type ResourcePipelineTriggerManual struct { +} + +type ResourcePipelineTrigger struct { + Cron *ResourcePipelineTriggerCron `json:"cron,omitempty"` + Manual *ResourcePipelineTriggerManual `json:"manual,omitempty"` } type ResourcePipeline struct { - AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` - Catalog string `json:"catalog,omitempty"` - Channel string `json:"channel,omitempty"` - Configuration map[string]string `json:"configuration,omitempty"` - Continuous bool `json:"continuous,omitempty"` - Development bool `json:"development,omitempty"` - Edition string `json:"edition,omitempty"` - Id string `json:"id,omitempty"` - Name string `json:"name,omitempty"` - Photon bool `json:"photon,omitempty"` - Serverless bool `json:"serverless,omitempty"` - Storage string `json:"storage,omitempty"` - Target string `json:"target,omitempty"` - Url string `json:"url,omitempty"` - Cluster []ResourcePipelineCluster `json:"cluster,omitempty"` - Deployment *ResourcePipelineDeployment `json:"deployment,omitempty"` - Filters *ResourcePipelineFilters `json:"filters,omitempty"` - Library []ResourcePipelineLibrary `json:"library,omitempty"` - Notification []ResourcePipelineNotification `json:"notification,omitempty"` + AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"` + Catalog string `json:"catalog,omitempty"` + Cause string `json:"cause,omitempty"` + Channel string `json:"channel,omitempty"` + ClusterId string `json:"cluster_id,omitempty"` + Configuration map[string]string `json:"configuration,omitempty"` + Continuous bool `json:"continuous,omitempty"` + CreatorUserName string `json:"creator_user_name,omitempty"` + Development bool `json:"development,omitempty"` + Edition string `json:"edition,omitempty"` + ExpectedLastModified int `json:"expected_last_modified,omitempty"` + Health string `json:"health,omitempty"` + Id string `json:"id,omitempty"` + LastModified int `json:"last_modified,omitempty"` + Name string `json:"name,omitempty"` + Photon bool `json:"photon,omitempty"` + RunAsUserName string `json:"run_as_user_name,omitempty"` + Serverless bool `json:"serverless,omitempty"` + State string `json:"state,omitempty"` + Storage string `json:"storage,omitempty"` + Target string `json:"target,omitempty"` + Url string `json:"url,omitempty"` + Cluster []ResourcePipelineCluster `json:"cluster,omitempty"` + Deployment *ResourcePipelineDeployment `json:"deployment,omitempty"` + Filters *ResourcePipelineFilters `json:"filters,omitempty"` + GatewayDefinition *ResourcePipelineGatewayDefinition `json:"gateway_definition,omitempty"` + IngestionDefinition *ResourcePipelineIngestionDefinition `json:"ingestion_definition,omitempty"` + LatestUpdates []ResourcePipelineLatestUpdates `json:"latest_updates,omitempty"` + Library []ResourcePipelineLibrary `json:"library,omitempty"` + Notification []ResourcePipelineNotification `json:"notification,omitempty"` + Trigger *ResourcePipelineTrigger `json:"trigger,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_storage_credential.go b/bundle/internal/tf/schema/resource_storage_credential.go index 1c62cf8df..7278c2193 100644 --- a/bundle/internal/tf/schema/resource_storage_credential.go +++ b/bundle/internal/tf/schema/resource_storage_credential.go @@ -20,6 +20,12 @@ type ResourceStorageCredentialAzureServicePrincipal struct { DirectoryId string `json:"directory_id"` } +type ResourceStorageCredentialCloudflareApiToken struct { + AccessKeyId string `json:"access_key_id"` + AccountId string `json:"account_id"` + SecretAccessKey string `json:"secret_access_key"` +} + type ResourceStorageCredentialDatabricksGcpServiceAccount struct { CredentialId string `json:"credential_id,omitempty"` Email string `json:"email,omitempty"` @@ -46,6 +52,7 @@ type ResourceStorageCredential struct { AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"` AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"` AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"` + CloudflareApiToken *ResourceStorageCredentialCloudflareApiToken `json:"cloudflare_api_token,omitempty"` DatabricksGcpServiceAccount *ResourceStorageCredentialDatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` GcpServiceAccountKey *ResourceStorageCredentialGcpServiceAccountKey `json:"gcp_service_account_key,omitempty"` } diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 79c1b32b5..737b77a2a 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -59,6 +59,7 @@ type Resources struct { MwsVpcEndpoint map[string]any `json:"databricks_mws_vpc_endpoint,omitempty"` MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` Notebook map[string]any `json:"databricks_notebook,omitempty"` + NotificationDestination map[string]any `json:"databricks_notification_destination,omitempty"` OboToken map[string]any `json:"databricks_obo_token,omitempty"` OnlineTable map[string]any `json:"databricks_online_table,omitempty"` PermissionAssignment map[string]any `json:"databricks_permission_assignment,omitempty"` @@ -160,6 +161,7 @@ func NewResources() *Resources { MwsVpcEndpoint: make(map[string]any), MwsWorkspaces: make(map[string]any), Notebook: make(map[string]any), + NotificationDestination: make(map[string]any), OboToken: make(map[string]any), OnlineTable: make(map[string]any), PermissionAssignment: make(map[string]any), diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 171128350..ebdb7f095 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.49.1" +const ProviderVersion = "1.50.0" func NewRoot() *Root { return &Root{ From a6eb673d55ad7cd6163050b6c3cc845c67ac52a5 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 15 Aug 2024 18:23:02 +0530 Subject: [PATCH 10/36] Print text logs in `import-dir` and `export-dir` commands (#1682) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes In https://github.com/databricks/cli/pull/1202 the semantics of `cmdio.RenderJson` was changes to always render the JSON object. Before we would only render it if `--output json` was specified. This PR fixes the logs to print human-readable log lines instead of a JSON object. This PR also removes the now unused `cmdio.Render` method. ## Tests Manually: ``` âžœ bundle-playground git:(master) ✗ cli workspace import-dir ./tmp /Users/shreyas.goenka@databricks.com/test-import-1 -p aws-prod-ucws Importing files from ./tmp a -> /Users/shreyas.goenka@databricks.com/test-import-1/a Import complete. The files are available at /Users/shreyas.goenka@databricks.com/test-import-1 ``` ``` âžœ bundle-playground git:(master) ✗ cli workspace export-dir /Users/shreyas.goenka@databricks.com/test-export-1 ./tmp-2 -p aws-prod-ucws Exporting files from /Users/shreyas.goenka@databricks.com/test-export-1 /Users/shreyas.goenka@databricks.com/test-export-1/b -> tmp-2/b Exported complete. The files are available at ./tmp-2 ``` --- cmd/workspace/workspace/export_dir.go | 5 +-- cmd/workspace/workspace/import_dir.go | 5 +-- internal/workspace_test.go | 53 ++++++++++++++++++--------- libs/cmdio/render.go | 8 ---- 4 files changed, 39 insertions(+), 32 deletions(-) diff --git a/cmd/workspace/workspace/export_dir.go b/cmd/workspace/workspace/export_dir.go index 0b53666f9..0046f46ef 100644 --- a/cmd/workspace/workspace/export_dir.go +++ b/cmd/workspace/workspace/export_dir.go @@ -110,8 +110,7 @@ func newExportDir() *cobra.Command { } workspaceFS := filer.NewFS(ctx, workspaceFiler) - // TODO: print progress events on stderr instead: https://github.com/databricks/cli/issues/448 - err = cmdio.RenderJson(ctx, newExportStartedEvent(opts.sourceDir)) + err = cmdio.RenderWithTemplate(ctx, newExportStartedEvent(opts.sourceDir), "", "Exporting files from {{.SourcePath}}\n") if err != nil { return err } @@ -120,7 +119,7 @@ func newExportDir() *cobra.Command { if err != nil { return err } - return cmdio.RenderJson(ctx, newExportCompletedEvent(opts.targetDir)) + return cmdio.RenderWithTemplate(ctx, newExportCompletedEvent(opts.targetDir), "", "Export complete\n") } return cmd diff --git a/cmd/workspace/workspace/import_dir.go b/cmd/workspace/workspace/import_dir.go index 19d9a0a17..a197d7dd9 100644 --- a/cmd/workspace/workspace/import_dir.go +++ b/cmd/workspace/workspace/import_dir.go @@ -134,8 +134,7 @@ Notebooks will have their extensions (one of .scala, .py, .sql, .ipynb, .r) stri return err } - // TODO: print progress events on stderr instead: https://github.com/databricks/cli/issues/448 - err = cmdio.RenderJson(ctx, newImportStartedEvent(opts.sourceDir)) + err = cmdio.RenderWithTemplate(ctx, newImportStartedEvent(opts.sourceDir), "", "Importing files from {{.SourcePath}}\n") if err != nil { return err } @@ -145,7 +144,7 @@ Notebooks will have their extensions (one of .scala, .py, .sql, .ipynb, .r) stri if err != nil { return err } - return cmdio.RenderJson(ctx, newImportCompletedEvent(opts.targetDir)) + return cmdio.RenderWithTemplate(ctx, newImportCompletedEvent(opts.targetDir), "", "Import complete\n") } return cmd diff --git a/internal/workspace_test.go b/internal/workspace_test.go index bc354914f..445361654 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -3,18 +3,17 @@ package internal import ( "context" "encoding/base64" - "errors" + "fmt" "io" - "net/http" "os" "path" "path/filepath" "strings" "testing" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -63,21 +62,12 @@ func TestAccWorkpaceExportPrintsContents(t *testing.T) { } func setupWorkspaceImportExportTest(t *testing.T) (context.Context, filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + ctx, wt := acc.WorkspaceTest(t) - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) - f, err := filer.NewWorkspaceFilesClient(w, tmpdir) + tmpdir := TemporaryWorkspaceDir(t, wt.W) + f, err := filer.NewWorkspaceFilesClient(wt.W, tmpdir) require.NoError(t, err) - // Check if we can use this API here, skip test if we cannot. - _, err = f.Read(ctx, "we_use_this_call_to_test_if_this_api_is_enabled") - var aerr *apierr.APIError - if errors.As(err, &aerr) && aerr.StatusCode == http.StatusBadRequest { - t.Skip(aerr.Message) - } - return ctx, f, tmpdir } @@ -122,8 +112,21 @@ func TestAccExportDir(t *testing.T) { err = f.Write(ctx, "a/b/c/file-b", strings.NewReader("def"), filer.CreateParentDirectories) require.NoError(t, err) + expectedLogs := strings.Join([]string{ + fmt.Sprintf("Exporting files from %s", sourceDir), + fmt.Sprintf("%s -> %s", path.Join(sourceDir, "a/b/c/file-b"), filepath.Join(targetDir, "a/b/c/file-b")), + fmt.Sprintf("%s -> %s", path.Join(sourceDir, "file-a"), filepath.Join(targetDir, "file-a")), + fmt.Sprintf("%s -> %s", path.Join(sourceDir, "pyNotebook"), filepath.Join(targetDir, "pyNotebook.py")), + fmt.Sprintf("%s -> %s", path.Join(sourceDir, "rNotebook"), filepath.Join(targetDir, "rNotebook.r")), + fmt.Sprintf("%s -> %s", path.Join(sourceDir, "scalaNotebook"), filepath.Join(targetDir, "scalaNotebook.scala")), + fmt.Sprintf("%s -> %s", path.Join(sourceDir, "sqlNotebook"), filepath.Join(targetDir, "sqlNotebook.sql")), + "Export complete\n", + }, "\n") + // Run Export - RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + stdout, stderr := RequireSuccessfulRun(t, "workspace", "export-dir", sourceDir, targetDir) + assert.Equal(t, expectedLogs, stdout.String()) + assert.Equal(t, "", stderr.String()) // Assert files were exported assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "abc") @@ -176,10 +179,24 @@ func TestAccExportDirWithOverwriteFlag(t *testing.T) { assertLocalFileContents(t, filepath.Join(targetDir, "file-a"), "content from workspace") } -// TODO: Add assertions on progress logs for workspace import-dir command. https://github.com/databricks/cli/issues/455 func TestAccImportDir(t *testing.T) { ctx, workspaceFiler, targetDir := setupWorkspaceImportExportTest(t) - RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") + stdout, stderr := RequireSuccessfulRun(t, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug") + + expectedLogs := strings.Join([]string{ + fmt.Sprintf("Importing files from %s", "./testdata/import_dir"), + fmt.Sprintf("%s -> %s", filepath.FromSlash("a/b/c/file-b"), path.Join(targetDir, "a/b/c/file-b")), + fmt.Sprintf("%s -> %s", filepath.FromSlash("file-a"), path.Join(targetDir, "file-a")), + fmt.Sprintf("%s -> %s", filepath.FromSlash("jupyterNotebook.ipynb"), path.Join(targetDir, "jupyterNotebook")), + fmt.Sprintf("%s -> %s", filepath.FromSlash("pyNotebook.py"), path.Join(targetDir, "pyNotebook")), + fmt.Sprintf("%s -> %s", filepath.FromSlash("rNotebook.r"), path.Join(targetDir, "rNotebook")), + fmt.Sprintf("%s -> %s", filepath.FromSlash("scalaNotebook.scala"), path.Join(targetDir, "scalaNotebook")), + fmt.Sprintf("%s -> %s", filepath.FromSlash("sqlNotebook.sql"), path.Join(targetDir, "sqlNotebook")), + "Import complete\n", + }, "\n") + + assert.Equal(t, expectedLogs, stdout.String()) + assert.Equal(t, "", stderr.String()) // Assert files are imported assertFilerFileContents(t, ctx, workspaceFiler, "file-a", "hello, world") diff --git a/libs/cmdio/render.go b/libs/cmdio/render.go index ec851b8ff..4114db5ca 100644 --- a/libs/cmdio/render.go +++ b/libs/cmdio/render.go @@ -280,14 +280,6 @@ func RenderIteratorWithTemplate[T any](ctx context.Context, i listing.Iterator[T return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, headerTemplate, template) } -func RenderJson(ctx context.Context, v any) error { - c := fromContext(ctx) - if _, ok := v.(listingInterface); ok { - panic("use RenderIteratorJson instead") - } - return renderWithTemplate(newRenderer(v), ctx, flags.OutputJSON, c.out, c.headerTemplate, c.template) -} - func RenderIteratorJson[T any](ctx context.Context, i listing.Iterator[T]) error { c := fromContext(ctx) return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, c.headerTemplate, c.template) From 54799a1918e4eca026090626539928d3d886736e Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 15 Aug 2024 15:23:07 +0200 Subject: [PATCH 11/36] Upgrade Go SDK to 0.44.0 (#1679) ## Changes Upgrade Go SDK to 0.44.0 --------- Co-authored-by: Pieter Noordhuis --- .codegen/_openapi_sha | 2 +- .gitattributes | 6 + bundle/config/variable/lookup.go | 4 +- bundle/run/pipeline.go | 4 +- bundle/run/progress/pipeline.go | 6 +- bundle/schema/docs/bundle_descriptions.json | 266 ++++++++-- cmd/account/budgets/budgets.go | 125 ++--- cmd/account/cmd.go | 4 +- .../custom-app-integration.go | 55 +- .../o-auth-published-apps.go | 2 +- .../published-app-integration.go | 43 +- .../usage-dashboards/usage-dashboards.go | 164 ++++++ .../workspace-assignment.go | 8 +- cmd/cmd.go | 2 + cmd/labs/project/installer_test.go | 11 +- cmd/root/auth_test.go | 8 + cmd/workspace/alerts-legacy/alerts-legacy.go | 388 ++++++++++++++ cmd/workspace/alerts/alerts.go | 144 +++-- cmd/workspace/apps/apps.go | 325 ++++++++++-- .../cluster-policies/cluster-policies.go | 60 +-- cmd/workspace/clusters/clusters.go | 142 ++++- cmd/workspace/cmd.go | 10 + .../consumer-fulfillments.go | 3 - .../consumer-installations.go | 3 - .../consumer-listings/consumer-listings.go | 7 - .../consumer-personalization-requests.go | 3 - .../consumer-providers/consumer-providers.go | 3 - cmd/workspace/data-sources/data-sources.go | 12 +- cmd/workspace/genie/genie.go | 437 +++++++++++++++ cmd/workspace/groups.go | 4 + cmd/workspace/jobs/jobs.go | 1 + cmd/workspace/lakeview/lakeview.go | 2 +- .../model-versions/model-versions.go | 3 + .../notification-destinations.go | 342 ++++++++++++ .../permission-migration.go | 16 +- cmd/workspace/permissions/permissions.go | 30 +- .../policy-families/policy-families.go | 13 +- .../provider-exchange-filters.go | 3 - .../provider-exchanges/provider-exchanges.go | 3 - .../provider-files/provider-files.go | 3 - .../provider-listings/provider-listings.go | 3 - .../provider-personalization-requests.go | 3 - .../provider-provider-analytics-dashboards.go | 3 - .../provider-providers/provider-providers.go | 3 - cmd/workspace/providers/providers.go | 5 + .../queries-legacy/queries-legacy.go | 500 ++++++++++++++++++ cmd/workspace/queries/queries.go | 227 ++++---- cmd/workspace/query-history/query-history.go | 23 +- .../query-visualizations-legacy.go | 253 +++++++++ .../query-visualizations.go | 72 ++- cmd/workspace/recipients/recipients.go | 7 + .../registered-models/registered-models.go | 1 + cmd/workspace/schemas/schemas.go | 2 + cmd/workspace/shares/shares.go | 23 +- .../system-schemas/system-schemas.go | 3 + .../workspace-bindings/workspace-bindings.go | 29 +- go.mod | 8 +- go.sum | 16 +- libs/databrickscfg/cfgpickers/clusters.go | 4 +- .../databrickscfg/cfgpickers/clusters_test.go | 8 +- 60 files changed, 3251 insertions(+), 609 deletions(-) create mode 100755 cmd/account/usage-dashboards/usage-dashboards.go create mode 100755 cmd/workspace/alerts-legacy/alerts-legacy.go create mode 100755 cmd/workspace/genie/genie.go create mode 100755 cmd/workspace/notification-destinations/notification-destinations.go create mode 100755 cmd/workspace/queries-legacy/queries-legacy.go create mode 100755 cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index c4b47ca14..fef6f268b 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -7437dabb9dadee402c1fc060df4c1ce8cc5369f0 \ No newline at end of file +f98c07f9c71f579de65d2587bb0292f83d10e55d \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index c11257e9e..bdb3f3982 100755 --- a/.gitattributes +++ b/.gitattributes @@ -24,10 +24,12 @@ cmd/account/service-principals/service-principals.go linguist-generated=true cmd/account/settings/settings.go linguist-generated=true cmd/account/storage-credentials/storage-credentials.go linguist-generated=true cmd/account/storage/storage.go linguist-generated=true +cmd/account/usage-dashboards/usage-dashboards.go linguist-generated=true cmd/account/users/users.go linguist-generated=true cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true cmd/account/workspaces/workspaces.go linguist-generated=true +cmd/workspace/alerts-legacy/alerts-legacy.go linguist-generated=true cmd/workspace/alerts/alerts.go linguist-generated=true cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true @@ -54,6 +56,7 @@ cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go lingu cmd/workspace/experiments/experiments.go linguist-generated=true cmd/workspace/external-locations/external-locations.go linguist-generated=true cmd/workspace/functions/functions.go linguist-generated=true +cmd/workspace/genie/genie.go linguist-generated=true cmd/workspace/git-credentials/git-credentials.go linguist-generated=true cmd/workspace/global-init-scripts/global-init-scripts.go linguist-generated=true cmd/workspace/grants/grants.go linguist-generated=true @@ -67,6 +70,7 @@ cmd/workspace/libraries/libraries.go linguist-generated=true cmd/workspace/metastores/metastores.go linguist-generated=true cmd/workspace/model-registry/model-registry.go linguist-generated=true cmd/workspace/model-versions/model-versions.go linguist-generated=true +cmd/workspace/notification-destinations/notification-destinations.go linguist-generated=true cmd/workspace/online-tables/online-tables.go linguist-generated=true cmd/workspace/permission-migration/permission-migration.go linguist-generated=true cmd/workspace/permissions/permissions.go linguist-generated=true @@ -81,8 +85,10 @@ cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics cmd/workspace/provider-providers/provider-providers.go linguist-generated=true cmd/workspace/providers/providers.go linguist-generated=true cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true +cmd/workspace/queries-legacy/queries-legacy.go linguist-generated=true cmd/workspace/queries/queries.go linguist-generated=true cmd/workspace/query-history/query-history.go linguist-generated=true +cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go linguist-generated=true cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=true cmd/workspace/recipients/recipients.go linguist-generated=true diff --git a/bundle/config/variable/lookup.go b/bundle/config/variable/lookup.go index 56d2ca810..9c85e2a71 100755 --- a/bundle/config/variable/lookup.go +++ b/bundle/config/variable/lookup.go @@ -220,7 +220,7 @@ type resolvers struct { func allResolvers() *resolvers { r := &resolvers{} r.Alert = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - entity, err := w.Alerts.GetByName(ctx, name) + entity, err := w.Alerts.GetByDisplayName(ctx, name) if err != nil { return "", err } @@ -284,7 +284,7 @@ func allResolvers() *resolvers { return fmt.Sprint(entity.PipelineId), nil } r.Query = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { - entity, err := w.Queries.GetByName(ctx, name) + entity, err := w.Queries.GetByDisplayName(ctx, name) if err != nil { return "", err } diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index 4e29b9f3f..d684f8388 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -53,7 +53,7 @@ func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId string, u // Otherwise for long lived pipelines, there can be a lot of unnecessary // latency due to multiple pagination API calls needed underneath the hood for // ListPipelineEventsAll - res, err := w.Pipelines.Impl().ListPipelineEvents(ctx, pipelines.ListPipelineEventsRequest{ + events, err := w.Pipelines.ListPipelineEventsAll(ctx, pipelines.ListPipelineEventsRequest{ Filter: `level='ERROR'`, MaxResults: 100, PipelineId: pipelineId, @@ -61,7 +61,7 @@ func (r *pipelineRunner) logErrorEvent(ctx context.Context, pipelineId string, u if err != nil { return err } - updateEvents := filterEventsByUpdateId(res.Events, updateId) + updateEvents := filterEventsByUpdateId(events, updateId) // The events API returns most recent events first. We iterate in a reverse order // to print the events chronologically for i := len(updateEvents) - 1; i >= 0; i-- { diff --git a/bundle/run/progress/pipeline.go b/bundle/run/progress/pipeline.go index fb076f680..4a256e76c 100644 --- a/bundle/run/progress/pipeline.go +++ b/bundle/run/progress/pipeline.go @@ -78,7 +78,7 @@ func (l *UpdateTracker) Events(ctx context.Context) ([]ProgressEvent, error) { } // we only check the most recent 100 events for progress - response, err := l.w.Pipelines.Impl().ListPipelineEvents(ctx, pipelines.ListPipelineEventsRequest{ + events, err := l.w.Pipelines.ListPipelineEventsAll(ctx, pipelines.ListPipelineEventsRequest{ PipelineId: l.PipelineId, MaxResults: 100, Filter: filter, @@ -89,8 +89,8 @@ func (l *UpdateTracker) Events(ctx context.Context) ([]ProgressEvent, error) { result := make([]ProgressEvent, 0) // we iterate in reverse to return events in chronological order - for i := len(response.Events) - 1; i >= 0; i-- { - event := response.Events[i] + for i := len(events) - 1; i >= 0; i-- { + event := events[i] // filter to only include update_progress and flow_progress events if event.EventType == "flow_progress" || event.EventType == "update_progress" { result = append(result, ProgressEvent(event)) diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 380be0545..d888b3663 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -218,7 +218,7 @@ } }, "description": { - "description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding." + "description": "An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding." }, "edit_mode": { "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified." @@ -935,7 +935,7 @@ } }, "egg": { - "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is not supported in Databricks Runtime 14.0 and above." }, "jar": { "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." @@ -1827,13 +1827,16 @@ } }, "external_model": { - "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n", + "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model,\nit cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later.\nThe task type of all external models within an endpoint must be the same.\n", "properties": { "ai21labs_config": { "description": "AI21Labs Config. Only required if the provider is 'ai21labs'.", "properties": { "ai21labs_api_key": { - "description": "The Databricks secret key reference for an AI21Labs API key." + "description": "The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`." + }, + "ai21labs_api_key_plaintext": { + "description": "An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`." } } }, @@ -1841,13 +1844,19 @@ "description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.", "properties": { "aws_access_key_id": { - "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." + "description": "The Databricks secret key reference for an AWS access key ID with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`." + }, + "aws_access_key_id_plaintext": { + "description": "An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`." }, "aws_region": { "description": "The AWS region to use. Bedrock has to be enabled there." }, "aws_secret_access_key": { - "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." + "description": "The Databricks secret key reference for an AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`." + }, + "aws_secret_access_key_plaintext": { + "description": "An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_secret_access_key`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`." }, "bedrock_provider": { "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." @@ -1858,15 +1867,24 @@ "description": "Anthropic Config. Only required if the provider is 'anthropic'.", "properties": { "anthropic_api_key": { - "description": "The Databricks secret key reference for an Anthropic API key." + "description": "The Databricks secret key reference for an Anthropic API key. If you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`." + }, + "anthropic_api_key_plaintext": { + "description": "The Anthropic API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `anthropic_api_key`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`." } } }, "cohere_config": { "description": "Cohere Config. Only required if the provider is 'cohere'.", "properties": { + "cohere_api_base": { + "description": "This is an optional field to provide a customized base URL for the Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n" + }, "cohere_api_key": { - "description": "The Databricks secret key reference for a Cohere API key." + "description": "The Databricks secret key reference for a Cohere API key. If you prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`." + }, + "cohere_api_key_plaintext": { + "description": "The Cohere API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `cohere_api_key`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`." } } }, @@ -1874,13 +1892,33 @@ "description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.", "properties": { "databricks_api_token": { - "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" + "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\nIf you prefer to paste your API key directly, see `databricks_api_token_plaintext`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n" + }, + "databricks_api_token_plaintext": { + "description": "The Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `databricks_api_token`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n" }, "databricks_workspace_url": { "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" } } }, + "google_cloud_vertex_ai_config": { + "description": "Google Cloud Vertex AI Config. Only required if the provider is 'google-cloud-vertex-ai'.", + "properties": { + "private_key": { + "description": "The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`" + }, + "private_key_plaintext": { + "description": "The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`." + }, + "project_id": { + "description": "This is the Google Cloud project id that the service account is associated with." + }, + "region": { + "description": "This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions." + } + } + }, "name": { "description": "The name of the external model." }, @@ -1891,16 +1929,22 @@ "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n" }, "microsoft_entra_client_secret": { - "description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n" + "description": "The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication.\nIf you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n" + }, + "microsoft_entra_client_secret_plaintext": { + "description": "The client secret used for Microsoft Entra ID authentication provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n" }, "microsoft_entra_tenant_id": { "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n" }, "openai_api_base": { - "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" + "description": "This is a field to provide a customized base URl for the OpenAI API.\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\nFor other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used.\n" }, "openai_api_key": { - "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." + "description": "The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`." + }, + "openai_api_key_plaintext": { + "description": "The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`." }, "openai_api_type": { "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" @@ -1920,12 +1964,15 @@ "description": "PaLM Config. Only required if the provider is 'palm'.", "properties": { "palm_api_key": { - "description": "The Databricks secret key reference for a PaLM API key." + "description": "The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`." + }, + "palm_api_key_plaintext": { + "description": "The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`." } } }, "provider": { - "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n" }, "task": { "description": "The task type of the external model." @@ -2331,6 +2378,9 @@ "driver_node_type_id": { "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above." }, + "enable_local_disk_encryption": { + "description": "Whether to enable local disk encryption for the cluster." + }, "gcp_attributes": { "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", "properties": { @@ -2525,7 +2575,7 @@ "description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location." }, "gateway_storage_name": { - "description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n" + "description": "Optional. The Unity Catalog-compatible name for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n" }, "gateway_storage_schema": { "description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location." @@ -2565,7 +2615,7 @@ "description": "Required. Schema name in the source database." }, "table_configuration": { - "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.", + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the IngestionPipelineDefinition object.", "properties": { "primary_keys": { "description": "The primary key of the table used to apply changes.", @@ -2605,7 +2655,7 @@ "description": "Required. Table name in the source database." }, "table_configuration": { - "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.", + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object and the SchemaSpec.", "properties": { "primary_keys": { "description": "The primary key of the table used to apply changes.", @@ -2685,6 +2735,9 @@ "description": "The absolute path of the notebook." } } + }, + "whl": { + "description": "URI of the whl to be installed." } } } @@ -2955,6 +3008,49 @@ } } } + }, + "schemas": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "catalog_name": { + "description": "" + }, + "comment": { + "description": "" + }, + "grants": { + "description": "", + "items": { + "description": "", + "properties": { + "principal": { + "description": "" + }, + "privileges": { + "description": "", + "items": { + "description": "" + } + } + } + } + }, + "name": { + "description": "" + }, + "properties": { + "description": "", + "additionalproperties": { + "description": "" + } + }, + "storage_root": { + "description": "" + } + } + } } } }, @@ -3194,7 +3290,7 @@ } }, "description": { - "description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding." + "description": "An optional description for the job. The maximum length is 27700 characters in UTF-8 encoding." }, "edit_mode": { "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified." @@ -3911,7 +4007,7 @@ } }, "egg": { - "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "Deprecated. URI of the egg library to install. Installing Python egg files is deprecated and is not supported in Databricks Runtime 14.0 and above." }, "jar": { "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." @@ -4803,13 +4899,16 @@ } }, "external_model": { - "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n", + "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. For an existing endpoint with external_model,\nit cannot be updated to an endpoint without external_model. If the endpoint is created without external_model, users cannot update it to add external_model later.\nThe task type of all external models within an endpoint must be the same.\n", "properties": { "ai21labs_config": { "description": "AI21Labs Config. Only required if the provider is 'ai21labs'.", "properties": { "ai21labs_api_key": { - "description": "The Databricks secret key reference for an AI21Labs API key." + "description": "The Databricks secret key reference for an AI21 Labs API key. If you prefer to paste your API key directly, see `ai21labs_api_key_plaintext`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`." + }, + "ai21labs_api_key_plaintext": { + "description": "An AI21 Labs API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `ai21labs_api_key`. You must provide an API key using one of the following fields: `ai21labs_api_key` or `ai21labs_api_key_plaintext`." } } }, @@ -4817,13 +4916,19 @@ "description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.", "properties": { "aws_access_key_id": { - "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." + "description": "The Databricks secret key reference for an AWS access key ID with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`." + }, + "aws_access_key_id_plaintext": { + "description": "An AWS access key ID with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_access_key_id`. You must provide an API key using one of the following fields: `aws_access_key_id` or `aws_access_key_id_plaintext`." }, "aws_region": { "description": "The AWS region to use. Bedrock has to be enabled there." }, "aws_secret_access_key": { - "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." + "description": "The Databricks secret key reference for an AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services. If you prefer to paste your API key directly, see `aws_secret_access_key_plaintext`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`." + }, + "aws_secret_access_key_plaintext": { + "description": "An AWS secret access key paired with the access key ID, with permissions to interact with Bedrock services provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `aws_secret_access_key`. You must provide an API key using one of the following fields: `aws_secret_access_key` or `aws_secret_access_key_plaintext`." }, "bedrock_provider": { "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." @@ -4834,15 +4939,24 @@ "description": "Anthropic Config. Only required if the provider is 'anthropic'.", "properties": { "anthropic_api_key": { - "description": "The Databricks secret key reference for an Anthropic API key." + "description": "The Databricks secret key reference for an Anthropic API key. If you prefer to paste your API key directly, see `anthropic_api_key_plaintext`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`." + }, + "anthropic_api_key_plaintext": { + "description": "The Anthropic API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `anthropic_api_key`. You must provide an API key using one of the following fields: `anthropic_api_key` or `anthropic_api_key_plaintext`." } } }, "cohere_config": { "description": "Cohere Config. Only required if the provider is 'cohere'.", "properties": { + "cohere_api_base": { + "description": "This is an optional field to provide a customized base URL for the Cohere API. \nIf left unspecified, the standard Cohere base URL is used.\n" + }, "cohere_api_key": { - "description": "The Databricks secret key reference for a Cohere API key." + "description": "The Databricks secret key reference for a Cohere API key. If you prefer to paste your API key directly, see `cohere_api_key_plaintext`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`." + }, + "cohere_api_key_plaintext": { + "description": "The Cohere API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `cohere_api_key`. You must provide an API key using one of the following fields: `cohere_api_key` or `cohere_api_key_plaintext`." } } }, @@ -4850,13 +4964,33 @@ "description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.", "properties": { "databricks_api_token": { - "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" + "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\nIf you prefer to paste your API key directly, see `databricks_api_token_plaintext`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n" + }, + "databricks_api_token_plaintext": { + "description": "The Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `databricks_api_token`.\nYou must provide an API key using one of the following fields: `databricks_api_token` or `databricks_api_token_plaintext`.\n" }, "databricks_workspace_url": { "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" } } }, + "google_cloud_vertex_ai_config": { + "description": "Google Cloud Vertex AI Config. Only required if the provider is 'google-cloud-vertex-ai'.", + "properties": { + "private_key": { + "description": "The Databricks secret key reference for a private key for the service account which has access to the Google Cloud Vertex AI Service. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to paste your API key directly, see `private_key_plaintext`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`" + }, + "private_key_plaintext": { + "description": "The private key for the service account which has access to the Google Cloud Vertex AI Service provided as a plaintext secret. See [Best practices for managing service account keys](https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys). If you prefer to reference your key using Databricks Secrets, see `private_key`. You must provide an API key using one of the following fields: `private_key` or `private_key_plaintext`." + }, + "project_id": { + "description": "This is the Google Cloud project id that the service account is associated with." + }, + "region": { + "description": "This is the region for the Google Cloud Vertex AI Service. See [supported regions](https://cloud.google.com/vertex-ai/docs/general/locations) for more details. Some models are only available in specific regions." + } + } + }, "name": { "description": "The name of the external model." }, @@ -4867,16 +5001,22 @@ "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n" }, "microsoft_entra_client_secret": { - "description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n" + "description": "The Databricks secret key reference for a client secret used for Microsoft Entra ID authentication.\nIf you prefer to paste your client secret directly, see `microsoft_entra_client_secret_plaintext`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n" + }, + "microsoft_entra_client_secret_plaintext": { + "description": "The client secret used for Microsoft Entra ID authentication provided as a plaintext string.\nIf you prefer to reference your key using Databricks Secrets, see `microsoft_entra_client_secret`.\nYou must provide an API key using one of the following fields: `microsoft_entra_client_secret` or `microsoft_entra_client_secret_plaintext`.\n" }, "microsoft_entra_tenant_id": { "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n" }, "openai_api_base": { - "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" + "description": "This is a field to provide a customized base URl for the OpenAI API.\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\nFor other OpenAI API types, this field is optional, and if left unspecified, the standard OpenAI base URL is used.\n" }, "openai_api_key": { - "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." + "description": "The Databricks secret key reference for an OpenAI API key using the OpenAI or Azure service. If you prefer to paste your API key directly, see `openai_api_key_plaintext`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`." + }, + "openai_api_key_plaintext": { + "description": "The OpenAI API key using the OpenAI or Azure service provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `openai_api_key`. You must provide an API key using one of the following fields: `openai_api_key` or `openai_api_key_plaintext`." }, "openai_api_type": { "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" @@ -4896,12 +5036,15 @@ "description": "PaLM Config. Only required if the provider is 'palm'.", "properties": { "palm_api_key": { - "description": "The Databricks secret key reference for a PaLM API key." + "description": "The Databricks secret key reference for a PaLM API key. If you prefer to paste your API key directly, see `palm_api_key_plaintext`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`." + }, + "palm_api_key_plaintext": { + "description": "The PaLM API key provided as a plaintext string. If you prefer to reference your key using Databricks Secrets, see `palm_api_key`. You must provide an API key using one of the following fields: `palm_api_key` or `palm_api_key_plaintext`." } } }, "provider": { - "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.\",\n" }, "task": { "description": "The task type of the external model." @@ -5307,6 +5450,9 @@ "driver_node_type_id": { "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above." }, + "enable_local_disk_encryption": { + "description": "Whether to enable local disk encryption for the cluster." + }, "gcp_attributes": { "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", "properties": { @@ -5501,7 +5647,7 @@ "description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location." }, "gateway_storage_name": { - "description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n" + "description": "Optional. The Unity Catalog-compatible name for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n" }, "gateway_storage_schema": { "description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location." @@ -5541,7 +5687,7 @@ "description": "Required. Schema name in the source database." }, "table_configuration": { - "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.", + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the IngestionPipelineDefinition object.", "properties": { "primary_keys": { "description": "The primary key of the table used to apply changes.", @@ -5581,7 +5727,7 @@ "description": "Required. Table name in the source database." }, "table_configuration": { - "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.", + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the IngestionPipelineDefinition object and the SchemaSpec.", "properties": { "primary_keys": { "description": "The primary key of the table used to apply changes.", @@ -5661,6 +5807,9 @@ "description": "The absolute path of the notebook." } } + }, + "whl": { + "description": "URI of the whl to be installed." } } } @@ -5931,6 +6080,49 @@ } } } + }, + "schemas": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "catalog_name": { + "description": "" + }, + "comment": { + "description": "" + }, + "grants": { + "description": "", + "items": { + "description": "", + "properties": { + "principal": { + "description": "" + }, + "privileges": { + "description": "", + "items": { + "description": "" + } + } + } + } + }, + "name": { + "description": "" + }, + "properties": { + "description": "", + "additionalproperties": { + "description": "" + } + }, + "storage_root": { + "description": "" + } + } + } } } }, @@ -6010,6 +6202,9 @@ "description": "" } } + }, + "type": { + "description": "" } } } @@ -6115,6 +6310,9 @@ "description": "" } } + }, + "type": { + "description": "" } } } diff --git a/cmd/account/budgets/budgets.go b/cmd/account/budgets/budgets.go index 82f7b9f01..6b47bb32c 100755 --- a/cmd/account/budgets/budgets.go +++ b/cmd/account/budgets/budgets.go @@ -19,16 +19,15 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "budgets", - Short: `These APIs manage budget configuration including notifications for exceeding a budget for a period.`, - Long: `These APIs manage budget configuration including notifications for exceeding a - budget for a period. They can also retrieve the status of each budget.`, + Short: `These APIs manage budget configurations for this account.`, + Long: `These APIs manage budget configurations for this account. Budgets enable you + to monitor usage across your account. You can set up budgets to either track + account-wide spending, or apply filters to track the spending of specific + teams, projects, or workspaces.`, GroupID: "billing", Annotations: map[string]string{ "package": "billing", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods @@ -52,23 +51,24 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *billing.WrappedBudget, + *billing.CreateBudgetConfigurationRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq billing.WrappedBudget + var createReq billing.CreateBudgetConfigurationRequest var createJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Use = "create" - cmd.Short = `Create a new budget.` - cmd.Long = `Create a new budget. + cmd.Short = `Create new budget.` + cmd.Long = `Create new budget. - Creates a new budget in the specified account.` + Create a new budget configuration for an account. For full details, see + https://docs.databricks.com/en/admin/account-settings/budgets.html.` cmd.Annotations = make(map[string]string) @@ -111,13 +111,13 @@ func newCreate() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteOverrides []func( *cobra.Command, - *billing.DeleteBudgetRequest, + *billing.DeleteBudgetConfigurationRequest, ) func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteReq billing.DeleteBudgetRequest + var deleteReq billing.DeleteBudgetConfigurationRequest // TODO: short flags @@ -125,35 +125,24 @@ func newDelete() *cobra.Command { cmd.Short = `Delete budget.` cmd.Long = `Delete budget. - Deletes the budget specified by its UUID. + Deletes a budget configuration for an account. Both account and budget + configuration are specified by ID. This cannot be undone. Arguments: - BUDGET_ID: Budget ID` + BUDGET_ID: The Databricks budget configuration ID.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No BUDGET_ID argument specified. Loading names for Budgets drop-down." - names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Budgets drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Budget ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have budget id") - } deleteReq.BudgetId = args[0] err = a.Budgets.Delete(ctx, deleteReq) @@ -181,50 +170,38 @@ func newDelete() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *billing.GetBudgetRequest, + *billing.GetBudgetConfigurationRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq billing.GetBudgetRequest + var getReq billing.GetBudgetConfigurationRequest // TODO: short flags cmd.Use = "get BUDGET_ID" - cmd.Short = `Get budget and its status.` - cmd.Long = `Get budget and its status. + cmd.Short = `Get budget.` + cmd.Long = `Get budget. - Gets the budget specified by its UUID, including noncumulative status for each - day that the budget is configured to include. + Gets a budget configuration for an account. Both account and budget + configuration are specified by ID. Arguments: - BUDGET_ID: Budget ID` + BUDGET_ID: The Databricks budget configuration ID.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No BUDGET_ID argument specified. Loading names for Budgets drop-down." - names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Budgets drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Budget ID") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have budget id") - } getReq.BudgetId = args[0] response, err := a.Budgets.Get(ctx, getReq) @@ -252,25 +229,37 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *billing.ListBudgetConfigurationsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq billing.ListBudgetConfigurationsRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token received from a previous get all budget configurations call.`) + cmd.Use = "list" cmd.Short = `Get all budgets.` cmd.Long = `Get all budgets. - Gets all budgets associated with this account, including noncumulative status - for each day that the budget is configured to include.` + Gets all budgets associated with this account.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response := a.Budgets.List(ctx) + + response := a.Budgets.List(ctx, listReq) return cmdio.RenderIterator(ctx, response) } @@ -280,7 +269,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd @@ -292,13 +281,13 @@ func newList() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateOverrides []func( *cobra.Command, - *billing.WrappedBudget, + *billing.UpdateBudgetConfigurationRequest, ) func newUpdate() *cobra.Command { cmd := &cobra.Command{} - var updateReq billing.WrappedBudget + var updateReq billing.UpdateBudgetConfigurationRequest var updateJson flags.JsonFlag // TODO: short flags @@ -308,11 +297,11 @@ func newUpdate() *cobra.Command { cmd.Short = `Modify budget.` cmd.Long = `Modify budget. - Modifies a budget in this account. Budget properties are completely - overwritten. + Updates a budget configuration for an account. Both account and budget + configuration are specified by ID. Arguments: - BUDGET_ID: Budget ID` + BUDGET_ID: The Databricks budget configuration ID.` cmd.Annotations = make(map[string]string) @@ -336,11 +325,11 @@ func newUpdate() *cobra.Command { } updateReq.BudgetId = args[0] - err = a.Budgets.Update(ctx, updateReq) + response, err := a.Budgets.Update(ctx, updateReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -355,4 +344,4 @@ func newUpdate() *cobra.Command { return cmd } -// end service Budgets +// end service budgets diff --git a/cmd/account/cmd.go b/cmd/account/cmd.go index 627d6d590..9b4bb8139 100644 --- a/cmd/account/cmd.go +++ b/cmd/account/cmd.go @@ -26,6 +26,7 @@ import ( account_settings "github.com/databricks/cli/cmd/account/settings" storage "github.com/databricks/cli/cmd/account/storage" account_storage_credentials "github.com/databricks/cli/cmd/account/storage-credentials" + usage_dashboards "github.com/databricks/cli/cmd/account/usage-dashboards" account_users "github.com/databricks/cli/cmd/account/users" vpc_endpoints "github.com/databricks/cli/cmd/account/vpc-endpoints" workspace_assignment "github.com/databricks/cli/cmd/account/workspace-assignment" @@ -40,7 +41,6 @@ func New() *cobra.Command { cmd.AddCommand(account_access_control.New()) cmd.AddCommand(billable_usage.New()) - cmd.AddCommand(budgets.New()) cmd.AddCommand(credentials.New()) cmd.AddCommand(custom_app_integration.New()) cmd.AddCommand(encryption_keys.New()) @@ -59,10 +59,12 @@ func New() *cobra.Command { cmd.AddCommand(account_settings.New()) cmd.AddCommand(storage.New()) cmd.AddCommand(account_storage_credentials.New()) + cmd.AddCommand(usage_dashboards.New()) cmd.AddCommand(account_users.New()) cmd.AddCommand(vpc_endpoints.New()) cmd.AddCommand(workspace_assignment.New()) cmd.AddCommand(workspaces.New()) + cmd.AddCommand(budgets.New()) // Register all groups with the parent command. groups := Groups() diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index ca9f69a35..5cdf422d7 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -3,8 +3,6 @@ package custom_app_integration import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -19,8 +17,8 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "custom-app-integration", - Short: `These APIs enable administrators to manage custom oauth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.`, - Long: `These APIs enable administrators to manage custom oauth app integrations, + Short: `These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.`, + Long: `These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.`, GroupID: "oauth2", @@ -62,7 +60,9 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().BoolVar(&createReq.Confidential, "confidential", createReq.Confidential, `indicates if an oauth client-secret should be generated.`) + cmd.Flags().BoolVar(&createReq.Confidential, "confidential", createReq.Confidential, `This field indicates whether an OAuth client secret is required to authenticate this client.`) + cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Name of the custom OAuth app.`) + // TODO: array: redirect_urls // TODO: array: scopes // TODO: complex arg: token_access_policy @@ -72,11 +72,16 @@ func newCreate() *cobra.Command { Create Custom OAuth App Integration. - You can retrieve the custom oauth app integration via + You can retrieve the custom OAuth app integration via :method:CustomAppIntegration/get.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -87,8 +92,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } response, err := a.CustomAppIntegration.Create(ctx, createReq) @@ -131,10 +134,7 @@ func newDelete() *cobra.Command { cmd.Long = `Delete Custom OAuth App Integration. Delete an existing Custom OAuth App Integration. You can retrieve the custom - oauth app integration via :method:CustomAppIntegration/get. - - Arguments: - INTEGRATION_ID: The oauth app integration ID.` + OAuth app integration via :method:CustomAppIntegration/get.` cmd.Annotations = make(map[string]string) @@ -189,10 +189,7 @@ func newGet() *cobra.Command { cmd.Short = `Get OAuth Custom App Integration.` cmd.Long = `Get OAuth Custom App Integration. - Gets the Custom OAuth App Integration for the given integration id. - - Arguments: - INTEGRATION_ID: The oauth app integration ID.` + Gets the Custom OAuth App Integration for the given integration id.` cmd.Annotations = make(map[string]string) @@ -233,25 +230,40 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *oauth2.ListCustomAppIntegrationsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq oauth2.ListCustomAppIntegrationsRequest + + // TODO: short flags + + cmd.Flags().BoolVar(&listReq.IncludeCreatorUsername, "include-creator-username", listReq.IncludeCreatorUsername, ``) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + cmd.Use = "list" cmd.Short = `Get custom oauth app integrations.` cmd.Long = `Get custom oauth app integrations. - Get the list of custom oauth app integrations for the specified Databricks + Get the list of custom OAuth app integrations for the specified Databricks account` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response := a.CustomAppIntegration.List(ctx) + + response := a.CustomAppIntegration.List(ctx, listReq) return cmdio.RenderIterator(ctx, response) } @@ -261,7 +273,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd @@ -293,10 +305,7 @@ func newUpdate() *cobra.Command { cmd.Long = `Updates Custom OAuth App Integration. Updates an existing custom OAuth App Integration. You can retrieve the custom - oauth app integration via :method:CustomAppIntegration/get. - - Arguments: - INTEGRATION_ID: The oauth app integration ID.` + OAuth app integration via :method:CustomAppIntegration/get.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/o-auth-published-apps/o-auth-published-apps.go b/cmd/account/o-auth-published-apps/o-auth-published-apps.go index 6573b0529..f1af17d2e 100755 --- a/cmd/account/o-auth-published-apps/o-auth-published-apps.go +++ b/cmd/account/o-auth-published-apps/o-auth-published-apps.go @@ -54,7 +54,7 @@ func newList() *cobra.Command { // TODO: short flags - cmd.Flags().Int64Var(&listReq.PageSize, "page-size", listReq.PageSize, `The max number of OAuth published apps to return.`) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The max number of OAuth published apps to return in one page.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`) cmd.Use = "list" diff --git a/cmd/account/published-app-integration/published-app-integration.go b/cmd/account/published-app-integration/published-app-integration.go index 32fed5cd0..5143d53cc 100755 --- a/cmd/account/published-app-integration/published-app-integration.go +++ b/cmd/account/published-app-integration/published-app-integration.go @@ -17,8 +17,8 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "published-app-integration", - Short: `These APIs enable administrators to manage published oauth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud.`, - Long: `These APIs enable administrators to manage published oauth app integrations, + Short: `These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud.`, + Long: `These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud.`, GroupID: "oauth2", @@ -60,7 +60,7 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.AppId, "app-id", createReq.AppId, `app_id of the oauth published app integration.`) + cmd.Flags().StringVar(&createReq.AppId, "app-id", createReq.AppId, `App id of the OAuth published app integration.`) // TODO: complex arg: token_access_policy cmd.Use = "create" @@ -69,7 +69,7 @@ func newCreate() *cobra.Command { Create Published OAuth App Integration. - You can retrieve the published oauth app integration via + You can retrieve the published OAuth app integration via :method:PublishedAppIntegration/get.` cmd.Annotations = make(map[string]string) @@ -131,10 +131,7 @@ func newDelete() *cobra.Command { cmd.Long = `Delete Published OAuth App Integration. Delete an existing Published OAuth App Integration. You can retrieve the - published oauth app integration via :method:PublishedAppIntegration/get. - - Arguments: - INTEGRATION_ID: The oauth app integration ID.` + published OAuth app integration via :method:PublishedAppIntegration/get.` cmd.Annotations = make(map[string]string) @@ -189,10 +186,7 @@ func newGet() *cobra.Command { cmd.Short = `Get OAuth Published App Integration.` cmd.Long = `Get OAuth Published App Integration. - Gets the Published OAuth App Integration for the given integration id. - - Arguments: - INTEGRATION_ID: The oauth app integration ID.` + Gets the Published OAuth App Integration for the given integration id.` cmd.Annotations = make(map[string]string) @@ -233,25 +227,39 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *oauth2.ListPublishedAppIntegrationsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq oauth2.ListPublishedAppIntegrationsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + cmd.Use = "list" cmd.Short = `Get published oauth app integrations.` cmd.Long = `Get published oauth app integrations. - Get the list of published oauth app integrations for the specified Databricks + Get the list of published OAuth app integrations for the specified Databricks account` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustAccountClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response := a.PublishedAppIntegration.List(ctx) + + response := a.PublishedAppIntegration.List(ctx, listReq) return cmdio.RenderIterator(ctx, response) } @@ -261,7 +269,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd @@ -292,10 +300,7 @@ func newUpdate() *cobra.Command { cmd.Long = `Updates Published OAuth App Integration. Updates an existing published OAuth App Integration. You can retrieve the - published oauth app integration via :method:PublishedAppIntegration/get. - - Arguments: - INTEGRATION_ID: The oauth app integration ID.` + published OAuth app integration via :method:PublishedAppIntegration/get.` cmd.Annotations = make(map[string]string) diff --git a/cmd/account/usage-dashboards/usage-dashboards.go b/cmd/account/usage-dashboards/usage-dashboards.go new file mode 100755 index 000000000..8a1c32476 --- /dev/null +++ b/cmd/account/usage-dashboards/usage-dashboards.go @@ -0,0 +1,164 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package usage_dashboards + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/billing" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "usage-dashboards", + Short: `These APIs manage usage dashboards for this account.`, + Long: `These APIs manage usage dashboards for this account. Usage dashboards enable + you to gain insights into your usage with pre-built dashboards: visualize + breakdowns, analyze tag attributions, and identify cost drivers.`, + GroupID: "billing", + Annotations: map[string]string{ + "package": "billing", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *billing.CreateBillingUsageDashboardRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq billing.CreateBillingUsageDashboardRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Var(&createReq.DashboardType, "dashboard-type", `Workspace level usage dashboard shows usage data for the specified workspace ID. Supported values: [USAGE_DASHBOARD_TYPE_GLOBAL, USAGE_DASHBOARD_TYPE_WORKSPACE]`) + cmd.Flags().Int64Var(&createReq.WorkspaceId, "workspace-id", createReq.WorkspaceId, `The workspace ID of the workspace in which the usage dashboard is created.`) + + cmd.Use = "create" + cmd.Short = `Create new usage dashboard.` + cmd.Long = `Create new usage dashboard. + + Create a usage dashboard specified by workspaceId, accountId, and dashboard + type.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + + response, err := a.UsageDashboards.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *billing.GetBillingUsageDashboardRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq billing.GetBillingUsageDashboardRequest + + // TODO: short flags + + cmd.Flags().Var(&getReq.DashboardType, "dashboard-type", `Workspace level usage dashboard shows usage data for the specified workspace ID. Supported values: [USAGE_DASHBOARD_TYPE_GLOBAL, USAGE_DASHBOARD_TYPE_WORKSPACE]`) + cmd.Flags().Int64Var(&getReq.WorkspaceId, "workspace-id", getReq.WorkspaceId, `The workspace ID of the workspace in which the usage dashboard is created.`) + + cmd.Use = "get" + cmd.Short = `Get usage dashboard.` + cmd.Long = `Get usage dashboard. + + Get a usage dashboard specified by workspaceId, accountId, and dashboard type.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.UsageDashboards.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// end service UsageDashboards diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index b965d31ad..58468d09f 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -66,7 +66,7 @@ func newDelete() *cobra.Command { for the specified principal. Arguments: - WORKSPACE_ID: The workspace ID. + WORKSPACE_ID: The workspace ID for the account. PRINCIPAL_ID: The ID of the user, service principal, or group.` cmd.Annotations = make(map[string]string) @@ -247,6 +247,8 @@ func newUpdate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: array: permissions + cmd.Use = "update WORKSPACE_ID PRINCIPAL_ID" cmd.Short = `Create or update permissions assignment.` cmd.Long = `Create or update permissions assignment. @@ -255,7 +257,7 @@ func newUpdate() *cobra.Command { workspace for the specified principal. Arguments: - WORKSPACE_ID: The workspace ID. + WORKSPACE_ID: The workspace ID for the account. PRINCIPAL_ID: The ID of the user, service principal, or group.` cmd.Annotations = make(map[string]string) @@ -275,8 +277,6 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } _, err = fmt.Sscan(args[0], &updateReq.WorkspaceId) if err != nil { diff --git a/cmd/cmd.go b/cmd/cmd.go index 5d835409f..5b53a4ae5 100644 --- a/cmd/cmd.go +++ b/cmd/cmd.go @@ -15,6 +15,7 @@ import ( "github.com/databricks/cli/cmd/sync" "github.com/databricks/cli/cmd/version" "github.com/databricks/cli/cmd/workspace" + "github.com/databricks/cli/cmd/workspace/apps" "github.com/spf13/cobra" ) @@ -67,6 +68,7 @@ func New(ctx context.Context) *cobra.Command { // Add other subcommands. cli.AddCommand(api.New()) + cli.AddCommand(apps.New()) cli.AddCommand(auth.New()) cli.AddCommand(bundle.New()) cli.AddCommand(configure.New()) diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index 8754a560b..1e45fafe6 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -182,7 +182,7 @@ func TestInstallerWorksForReleases(t *testing.T) { w.Write(raw) return } - if r.URL.Path == "/api/2.0/clusters/get" { + if r.URL.Path == "/api/2.1/clusters/get" { respondWithJSON(t, w, &compute.ClusterDetails{ State: compute.StateRunning, }) @@ -249,8 +249,9 @@ func TestInstallerWorksForDevelopment(t *testing.T) { Path: filepath.Dir(t.TempDir()), }) }() + server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - if r.URL.Path == "/api/2.0/clusters/list" { + if r.URL.Path == "/api/2.1/clusters/list" { respondWithJSON(t, w, compute.ListClustersResponse{ Clusters: []compute.ClusterDetails{ { @@ -278,7 +279,7 @@ func TestInstallerWorksForDevelopment(t *testing.T) { }) return } - if r.URL.Path == "/api/2.0/clusters/spark-versions" { + if r.URL.Path == "/api/2.1/clusters/spark-versions" { respondWithJSON(t, w, compute.GetSparkVersionsResponse{ Versions: []compute.SparkVersion{ { @@ -289,7 +290,7 @@ func TestInstallerWorksForDevelopment(t *testing.T) { }) return } - if r.URL.Path == "/api/2.0/clusters/get" { + if r.URL.Path == "/api/2.1/clusters/get" { respondWithJSON(t, w, &compute.ClusterDetails{ State: compute.StateRunning, }) @@ -387,7 +388,7 @@ func TestUpgraderWorksForReleases(t *testing.T) { w.Write(raw) return } - if r.URL.Path == "/api/2.0/clusters/get" { + if r.URL.Path == "/api/2.1/clusters/get" { respondWithJSON(t, w, &compute.ClusterDetails{ State: compute.StateRunning, }) diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go index 486f587ef..9ba2a8fa9 100644 --- a/cmd/root/auth_test.go +++ b/cmd/root/auth_test.go @@ -111,6 +111,10 @@ func TestAccountClientOrPrompt(t *testing.T) { expectPrompts(t, accountPromptFn, &config.Config{ Host: "https://accounts.azuredatabricks.net/", AccountID: "1234", + + // Force SDK to not try and lookup the tenant ID from the host. + // The host above is invalid and will not be reachable. + AzureTenantID: "nonempty", }) }) @@ -165,6 +169,10 @@ func TestWorkspaceClientOrPrompt(t *testing.T) { t.Run("Prompt if no credential provider can be configured", func(t *testing.T) { expectPrompts(t, workspacePromptFn, &config.Config{ Host: "https://adb-1111.11.azuredatabricks.net/", + + // Force SDK to not try and lookup the tenant ID from the host. + // The host above is invalid and will not be reachable. + AzureTenantID: "nonempty", }) }) diff --git a/cmd/workspace/alerts-legacy/alerts-legacy.go b/cmd/workspace/alerts-legacy/alerts-legacy.go new file mode 100755 index 000000000..1046b1124 --- /dev/null +++ b/cmd/workspace/alerts-legacy/alerts-legacy.go @@ -0,0 +1,388 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package alerts_legacy + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "alerts-legacy", + Short: `The alerts API can be used to perform CRUD operations on alerts.`, + Long: `The alerts API can be used to perform CRUD operations on alerts. An alert is a + Databricks SQL object that periodically runs a query, evaluates a condition of + its result, and notifies one or more users and/or notification destinations if + the condition was met. Alerts can be scheduled using the sql_task type of + the Jobs API, e.g. :method:jobs/create. + + **Note**: A new version of the Databricks SQL API is now available. Please see + the latest version. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html`, + GroupID: "sql", + Annotations: map[string]string{ + "package": "sql", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *sql.CreateAlert, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq sql.CreateAlert + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Parent, "parent", createReq.Parent, `The identifier of the workspace folder containing the object.`) + cmd.Flags().IntVar(&createReq.Rearm, "rearm", createReq.Rearm, `Number of seconds after being triggered before the alert rearms itself and can be triggered again.`) + + cmd.Use = "create" + cmd.Short = `Create an alert.` + cmd.Long = `Create an alert. + + Creates an alert. An alert is a Databricks SQL object that periodically runs a + query, evaluates a condition of its result, and notifies users or notification + destinations if the condition was met. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/create instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.AlertsLegacy.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *sql.DeleteAlertsLegacyRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq sql.DeleteAlertsLegacyRequest + + // TODO: short flags + + cmd.Use = "delete ALERT_ID" + cmd.Short = `Delete an alert.` + cmd.Long = `Delete an alert. + + Deletes an alert. Deleted alerts are no longer accessible and cannot be + restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to + the trash. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/delete instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ALERT_ID argument specified. Loading names for Alerts Legacy drop-down." + names, err := w.AlertsLegacy.LegacyAlertNameToIdMap(ctx) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Alerts Legacy drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.AlertId = args[0] + + err = w.AlertsLegacy.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *sql.GetAlertsLegacyRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq sql.GetAlertsLegacyRequest + + // TODO: short flags + + cmd.Use = "get ALERT_ID" + cmd.Short = `Get an alert.` + cmd.Long = `Get an alert. + + Gets an alert. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/get instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ALERT_ID argument specified. Loading names for Alerts Legacy drop-down." + names, err := w.AlertsLegacy.LegacyAlertNameToIdMap(ctx) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Alerts Legacy drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.AlertId = args[0] + + response, err := w.AlertsLegacy.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "list" + cmd.Short = `Get alerts.` + cmd.Long = `Get alerts. + + Gets a list of alerts. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/list instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.AlertsLegacy.List(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *sql.EditAlert, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq sql.EditAlert + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().IntVar(&updateReq.Rearm, "rearm", updateReq.Rearm, `Number of seconds after being triggered before the alert rearms itself and can be triggered again.`) + + cmd.Use = "update ALERT_ID" + cmd.Short = `Update an alert.` + cmd.Long = `Update an alert. + + Updates an alert. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:alerts/update instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.AlertId = args[0] + + err = w.AlertsLegacy.Update(ctx, updateReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AlertsLegacy diff --git a/cmd/workspace/alerts/alerts.go b/cmd/workspace/alerts/alerts.go index 61c1e0eab..cfaa3f55f 100755 --- a/cmd/workspace/alerts/alerts.go +++ b/cmd/workspace/alerts/alerts.go @@ -24,12 +24,7 @@ func New() *cobra.Command { Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the sql_task type of - the Jobs API, e.g. :method:jobs/create. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, + the Jobs API, e.g. :method:jobs/create.`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -57,36 +52,33 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *sql.CreateAlert, + *sql.CreateAlertRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq sql.CreateAlert + var createReq sql.CreateAlertRequest var createJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.Parent, "parent", createReq.Parent, `The identifier of the workspace folder containing the object.`) - cmd.Flags().IntVar(&createReq.Rearm, "rearm", createReq.Rearm, `Number of seconds after being triggered before the alert rearms itself and can be triggered again.`) + // TODO: complex arg: alert cmd.Use = "create" cmd.Short = `Create an alert.` cmd.Long = `Create an alert. - Creates an alert. An alert is a Databricks SQL object that periodically runs a - query, evaluates a condition of its result, and notifies users or notification - destinations if the condition was met. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + Creates an alert.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -97,8 +89,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } response, err := w.Alerts.Create(ctx, createReq) @@ -126,28 +116,23 @@ func newCreate() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteOverrides []func( *cobra.Command, - *sql.DeleteAlertRequest, + *sql.TrashAlertRequest, ) func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteReq sql.DeleteAlertRequest + var deleteReq sql.TrashAlertRequest // TODO: short flags - cmd.Use = "delete ALERT_ID" + cmd.Use = "delete ID" cmd.Short = `Delete an alert.` cmd.Long = `Delete an alert. - Deletes an alert. Deleted alerts are no longer accessible and cannot be - restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to - the trash. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + Moves an alert to the trash. Trashed alerts immediately disappear from + searches and list views, and can no longer trigger. You can restore a trashed + alert through the UI. A trashed alert is permanently deleted after 30 days.` cmd.Annotations = make(map[string]string) @@ -158,8 +143,8 @@ func newDelete() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ALERT_ID argument specified. Loading names for Alerts drop-down." - names, err := w.Alerts.AlertNameToIdMap(ctx) + promptSpinner <- "No ID argument specified. Loading names for Alerts drop-down." + names, err := w.Alerts.ListAlertsResponseAlertDisplayNameToIdMap(ctx, sql.ListAlertsRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Alerts drop-down. Please manually specify required arguments. Original error: %w", err) @@ -173,7 +158,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have ") } - deleteReq.AlertId = args[0] + deleteReq.Id = args[0] err = w.Alerts.Delete(ctx, deleteReq) if err != nil { @@ -210,16 +195,11 @@ func newGet() *cobra.Command { // TODO: short flags - cmd.Use = "get ALERT_ID" + cmd.Use = "get ID" cmd.Short = `Get an alert.` cmd.Long = `Get an alert. - Gets an alert. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + Gets an alert.` cmd.Annotations = make(map[string]string) @@ -230,8 +210,8 @@ func newGet() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No ALERT_ID argument specified. Loading names for Alerts drop-down." - names, err := w.Alerts.AlertNameToIdMap(ctx) + promptSpinner <- "No ID argument specified. Loading names for Alerts drop-down." + names, err := w.Alerts.ListAlertsResponseAlertDisplayNameToIdMap(ctx, sql.ListAlertsRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Alerts drop-down. Please manually specify required arguments. Original error: %w", err) @@ -245,7 +225,7 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have ") } - getReq.AlertId = args[0] + getReq.Id = args[0] response, err := w.Alerts.Get(ctx, getReq) if err != nil { @@ -272,33 +252,41 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *sql.ListAlertsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq sql.ListAlertsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + cmd.Use = "list" - cmd.Short = `Get alerts.` - cmd.Long = `Get alerts. + cmd.Short = `List alerts.` + cmd.Long = `List alerts. - Gets a list of alerts. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + Gets a list of alerts accessible to the user, ordered by creation time. + **Warning:** Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Alerts.List(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + + response := w.Alerts.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -307,7 +295,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd @@ -319,35 +307,44 @@ func newList() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateOverrides []func( *cobra.Command, - *sql.EditAlert, + *sql.UpdateAlertRequest, ) func newUpdate() *cobra.Command { cmd := &cobra.Command{} - var updateReq sql.EditAlert + var updateReq sql.UpdateAlertRequest var updateJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().IntVar(&updateReq.Rearm, "rearm", updateReq.Rearm, `Number of seconds after being triggered before the alert rearms itself and can be triggered again.`) + // TODO: complex arg: alert - cmd.Use = "update ALERT_ID" + cmd.Use = "update ID UPDATE_MASK" cmd.Short = `Update an alert.` cmd.Long = `Update an alert. Updates an alert. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + + Arguments: + ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space).` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only ID as positional arguments. Provide 'update_mask' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) return check(cmd, args) } @@ -361,16 +358,17 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } - updateReq.AlertId = args[0] + updateReq.Id = args[0] + if !cmd.Flags().Changed("json") { + updateReq.UpdateMask = args[1] + } - err = w.Alerts.Update(ctx, updateReq) + response, err := w.Alerts.Update(ctx, updateReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 1572d4f4b..bc3fbe920 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -9,7 +9,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/databricks/databricks-sdk-go/service/apps" "github.com/spf13/cobra" ) @@ -24,9 +24,9 @@ func New() *cobra.Command { Long: `Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.`, - GroupID: "serving", + GroupID: "apps", Annotations: map[string]string{ - "package": "serving", + "package": "apps", }, // This service is being previewed; hide from help output. @@ -39,12 +39,15 @@ func New() *cobra.Command { cmd.AddCommand(newDeploy()) cmd.AddCommand(newGet()) cmd.AddCommand(newGetDeployment()) - cmd.AddCommand(newGetEnvironment()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) cmd.AddCommand(newList()) cmd.AddCommand(newListDeployments()) + cmd.AddCommand(newSetPermissions()) cmd.AddCommand(newStart()) cmd.AddCommand(newStop()) cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -60,13 +63,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *serving.CreateAppRequest, + *apps.CreateAppRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq serving.CreateAppRequest + var createReq apps.CreateAppRequest var createJson flags.JsonFlag var createSkipWait bool @@ -126,7 +129,7 @@ func newCreate() *cobra.Command { return cmdio.Render(ctx, wait.Response) } spinner := cmdio.Spinner(ctx) - info, err := wait.OnProgress(func(i *serving.App) { + info, err := wait.OnProgress(func(i *apps.App) { if i.Status == nil { return } @@ -162,13 +165,13 @@ func newCreate() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteOverrides []func( *cobra.Command, - *serving.DeleteAppRequest, + *apps.DeleteAppRequest, ) func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteReq serving.DeleteAppRequest + var deleteReq apps.DeleteAppRequest // TODO: short flags @@ -220,13 +223,13 @@ func newDelete() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deployOverrides []func( *cobra.Command, - *serving.CreateAppDeploymentRequest, + *apps.CreateAppDeploymentRequest, ) func newDeploy() *cobra.Command { cmd := &cobra.Command{} - var deployReq serving.CreateAppDeploymentRequest + var deployReq apps.CreateAppDeploymentRequest var deployJson flags.JsonFlag var deploySkipWait bool @@ -237,7 +240,9 @@ func newDeploy() *cobra.Command { // TODO: short flags cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "deploy APP_NAME SOURCE_CODE_PATH MODE" + cmd.Flags().Var(&deployReq.Mode, "mode", `The mode of which the deployment will manage the source code. Supported values: [AUTO_SYNC, SNAPSHOT]`) + + cmd.Use = "deploy APP_NAME SOURCE_CODE_PATH" cmd.Short = `Create an app deployment.` cmd.Long = `Create an app deployment. @@ -251,8 +256,7 @@ func newDeploy() *cobra.Command { deployed app. The former refers to the original source code location of the app in the workspace during deployment creation, whereas the latter provides a system generated stable snapshotted source code path used by - the deployment. - MODE: The mode of which the deployment will manage the source code.` + the deployment.` cmd.Annotations = make(map[string]string) @@ -260,11 +264,11 @@ func newDeploy() *cobra.Command { if cmd.Flags().Changed("json") { err := root.ExactArgs(1)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'source_code_path', 'mode' in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'source_code_path' in your JSON input") } return nil } - check := root.ExactArgs(3) + check := root.ExactArgs(2) return check(cmd, args) } @@ -283,12 +287,6 @@ func newDeploy() *cobra.Command { if !cmd.Flags().Changed("json") { deployReq.SourceCodePath = args[1] } - if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &deployReq.Mode) - if err != nil { - return fmt.Errorf("invalid MODE: %s", args[2]) - } - } wait, err := w.Apps.Deploy(ctx, deployReq) if err != nil { @@ -298,7 +296,7 @@ func newDeploy() *cobra.Command { return cmdio.Render(ctx, wait.Response) } spinner := cmdio.Spinner(ctx) - info, err := wait.OnProgress(func(i *serving.AppDeployment) { + info, err := wait.OnProgress(func(i *apps.AppDeployment) { if i.Status == nil { return } @@ -334,13 +332,13 @@ func newDeploy() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getOverrides []func( *cobra.Command, - *serving.GetAppRequest, + *apps.GetAppRequest, ) func newGet() *cobra.Command { cmd := &cobra.Command{} - var getReq serving.GetAppRequest + var getReq apps.GetAppRequest // TODO: short flags @@ -392,13 +390,13 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var getDeploymentOverrides []func( *cobra.Command, - *serving.GetAppDeploymentRequest, + *apps.GetAppDeploymentRequest, ) func newGetDeployment() *cobra.Command { cmd := &cobra.Command{} - var getDeploymentReq serving.GetAppDeploymentRequest + var getDeploymentReq apps.GetAppDeploymentRequest // TODO: short flags @@ -447,30 +445,30 @@ func newGetDeployment() *cobra.Command { return cmd } -// start get-environment command +// start get-permission-levels command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var getEnvironmentOverrides []func( +var getPermissionLevelsOverrides []func( *cobra.Command, - *serving.GetAppEnvironmentRequest, + *apps.GetAppPermissionLevelsRequest, ) -func newGetEnvironment() *cobra.Command { +func newGetPermissionLevels() *cobra.Command { cmd := &cobra.Command{} - var getEnvironmentReq serving.GetAppEnvironmentRequest + var getPermissionLevelsReq apps.GetAppPermissionLevelsRequest // TODO: short flags - cmd.Use = "get-environment NAME" - cmd.Short = `Get app environment.` - cmd.Long = `Get app environment. + cmd.Use = "get-permission-levels APP_NAME" + cmd.Short = `Get app permission levels.` + cmd.Long = `Get app permission levels. - Retrieves app environment. + Gets the permission levels that a user can have on an object. Arguments: - NAME: The name of the app.` + APP_NAME: The app for which to get or manage permissions.` cmd.Annotations = make(map[string]string) @@ -484,9 +482,9 @@ func newGetEnvironment() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getEnvironmentReq.Name = args[0] + getPermissionLevelsReq.AppName = args[0] - response, err := w.Apps.GetEnvironment(ctx, getEnvironmentReq) + response, err := w.Apps.GetPermissionLevels(ctx, getPermissionLevelsReq) if err != nil { return err } @@ -498,8 +496,67 @@ func newGetEnvironment() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range getEnvironmentOverrides { - fn(cmd, &getEnvironmentReq) + for _, fn := range getPermissionLevelsOverrides { + fn(cmd, &getPermissionLevelsReq) + } + + return cmd +} + +// start get-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPermissionsOverrides []func( + *cobra.Command, + *apps.GetAppPermissionsRequest, +) + +func newGetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var getPermissionsReq apps.GetAppPermissionsRequest + + // TODO: short flags + + cmd.Use = "get-permissions APP_NAME" + cmd.Short = `Get app permissions.` + cmd.Long = `Get app permissions. + + Gets the permissions of an app. Apps can inherit permissions from their root + object. + + Arguments: + APP_NAME: The app for which to get or manage permissions.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPermissionsReq.AppName = args[0] + + response, err := w.Apps.GetPermissions(ctx, getPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPermissionsOverrides { + fn(cmd, &getPermissionsReq) } return cmd @@ -511,13 +568,13 @@ func newGetEnvironment() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, - *serving.ListAppsRequest, + *apps.ListAppsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} - var listReq serving.ListAppsRequest + var listReq apps.ListAppsRequest // TODO: short flags @@ -564,13 +621,13 @@ func newList() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listDeploymentsOverrides []func( *cobra.Command, - *serving.ListAppDeploymentsRequest, + *apps.ListAppDeploymentsRequest, ) func newListDeployments() *cobra.Command { cmd := &cobra.Command{} - var listDeploymentsReq serving.ListAppDeploymentsRequest + var listDeploymentsReq apps.ListAppDeploymentsRequest // TODO: short flags @@ -616,20 +673,94 @@ func newListDeployments() *cobra.Command { return cmd } +// start set-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var setPermissionsOverrides []func( + *cobra.Command, + *apps.AppPermissionsRequest, +) + +func newSetPermissions() *cobra.Command { + cmd := &cobra.Command{} + + var setPermissionsReq apps.AppPermissionsRequest + var setPermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&setPermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "set-permissions APP_NAME" + cmd.Short = `Set app permissions.` + cmd.Long = `Set app permissions. + + Sets permissions on an app. Apps can inherit permissions from their root + object. + + Arguments: + APP_NAME: The app for which to get or manage permissions.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = setPermissionsJson.Unmarshal(&setPermissionsReq) + if err != nil { + return err + } + } + setPermissionsReq.AppName = args[0] + + response, err := w.Apps.SetPermissions(ctx, setPermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range setPermissionsOverrides { + fn(cmd, &setPermissionsReq) + } + + return cmd +} + // start start command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var startOverrides []func( *cobra.Command, - *serving.StartAppRequest, + *apps.StartAppRequest, ) func newStart() *cobra.Command { cmd := &cobra.Command{} - var startReq serving.StartAppRequest + var startReq apps.StartAppRequest + var startSkipWait bool + var startTimeout time.Duration + + cmd.Flags().BoolVar(&startSkipWait, "no-wait", startSkipWait, `do not wait to reach SUCCEEDED state`) + cmd.Flags().DurationVar(&startTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach SUCCEEDED state`) // TODO: short flags cmd.Use = "start NAME" @@ -655,11 +786,30 @@ func newStart() *cobra.Command { startReq.Name = args[0] - response, err := w.Apps.Start(ctx, startReq) + wait, err := w.Apps.Start(ctx, startReq) if err != nil { return err } - return cmdio.Render(ctx, response) + if startSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *apps.AppDeployment) { + if i.Status == nil { + return + } + status := i.Status.State + statusMessage := fmt.Sprintf("current status: %s", status) + if i.Status != nil { + statusMessage = i.Status.Message + } + spinner <- statusMessage + }).GetWithTimeout(startTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) } // Disable completions since they are not applicable. @@ -680,13 +830,13 @@ func newStart() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var stopOverrides []func( *cobra.Command, - *serving.StopAppRequest, + *apps.StopAppRequest, ) func newStop() *cobra.Command { cmd := &cobra.Command{} - var stopReq serving.StopAppRequest + var stopReq apps.StopAppRequest // TODO: short flags @@ -738,13 +888,13 @@ func newStop() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateOverrides []func( *cobra.Command, - *serving.UpdateAppRequest, + *apps.UpdateAppRequest, ) func newUpdate() *cobra.Command { cmd := &cobra.Command{} - var updateReq serving.UpdateAppRequest + var updateReq apps.UpdateAppRequest var updateJson flags.JsonFlag // TODO: short flags @@ -801,4 +951,73 @@ func newUpdate() *cobra.Command { return cmd } +// start update-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updatePermissionsOverrides []func( + *cobra.Command, + *apps.AppPermissionsRequest, +) + +func newUpdatePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var updatePermissionsReq apps.AppPermissionsRequest + var updatePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: array: access_control_list + + cmd.Use = "update-permissions APP_NAME" + cmd.Short = `Update app permissions.` + cmd.Long = `Update app permissions. + + Updates the permissions on an app. Apps can inherit permissions from their + root object. + + Arguments: + APP_NAME: The app for which to get or manage permissions.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updatePermissionsJson.Unmarshal(&updatePermissionsReq) + if err != nil { + return err + } + } + updatePermissionsReq.AppName = args[0] + + response, err := w.Apps.UpdatePermissions(ctx, updatePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updatePermissionsOverrides { + fn(cmd, &updatePermissionsReq) + } + + return cmd +} + // end service Apps diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 8129db477..830d44ca3 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -90,30 +90,20 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `Additional human-readable description of the cluster policy.`) // TODO: array: libraries cmd.Flags().Int64Var(&createReq.MaxClustersPerUser, "max-clusters-per-user", createReq.MaxClustersPerUser, `Max number of clusters per user that can be active using this policy.`) + cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Cluster Policy name requested by the user.`) cmd.Flags().StringVar(&createReq.PolicyFamilyDefinitionOverrides, "policy-family-definition-overrides", createReq.PolicyFamilyDefinitionOverrides, `Policy definition JSON document expressed in [Databricks Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html).`) cmd.Flags().StringVar(&createReq.PolicyFamilyId, "policy-family-id", createReq.PolicyFamilyId, `ID of the policy family.`) - cmd.Use = "create NAME" + cmd.Use = "create" cmd.Short = `Create a new policy.` cmd.Long = `Create a new policy. - Creates a new policy with prescribed settings. - - Arguments: - NAME: Cluster Policy name requested by the user. This has to be unique. Length - must be between 1 and 100 characters.` + Creates a new policy with prescribed settings.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(0)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") - } - return nil - } - check := root.ExactArgs(1) + check := root.ExactArgs(0) return check(cmd, args) } @@ -128,9 +118,6 @@ func newCreate() *cobra.Command { return err } } - if !cmd.Flags().Changed("json") { - createReq.Name = args[0] - } response, err := w.ClusterPolicies.Create(ctx, createReq) if err != nil { @@ -264,10 +251,11 @@ func newEdit() *cobra.Command { cmd.Flags().StringVar(&editReq.Description, "description", editReq.Description, `Additional human-readable description of the cluster policy.`) // TODO: array: libraries cmd.Flags().Int64Var(&editReq.MaxClustersPerUser, "max-clusters-per-user", editReq.MaxClustersPerUser, `Max number of clusters per user that can be active using this policy.`) + cmd.Flags().StringVar(&editReq.Name, "name", editReq.Name, `Cluster Policy name requested by the user.`) cmd.Flags().StringVar(&editReq.PolicyFamilyDefinitionOverrides, "policy-family-definition-overrides", editReq.PolicyFamilyDefinitionOverrides, `Policy definition JSON document expressed in [Databricks Policy Definition Language](https://docs.databricks.com/administration-guide/clusters/policy-definition.html).`) cmd.Flags().StringVar(&editReq.PolicyFamilyId, "policy-family-id", editReq.PolicyFamilyId, `ID of the policy family.`) - cmd.Use = "edit POLICY_ID NAME" + cmd.Use = "edit POLICY_ID" cmd.Short = `Update a cluster policy.` cmd.Long = `Update a cluster policy. @@ -275,9 +263,7 @@ func newEdit() *cobra.Command { governed by the previous policy invalid. Arguments: - POLICY_ID: The ID of the policy to update. - NAME: Cluster Policy name requested by the user. This has to be unique. Length - must be between 1 and 100 characters.` + POLICY_ID: The ID of the policy to update.` cmd.Annotations = make(map[string]string) @@ -285,12 +271,11 @@ func newEdit() *cobra.Command { if cmd.Flags().Changed("json") { err := root.ExactArgs(0)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'policy_id', 'name' in your JSON input") + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'policy_id' in your JSON input") } return nil } - check := root.ExactArgs(2) - return check(cmd, args) + return nil } cmd.PreRunE = root.MustWorkspaceClient @@ -303,13 +288,26 @@ func newEdit() *cobra.Command { if err != nil { return err } - } - if !cmd.Flags().Changed("json") { + } else { + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No POLICY_ID argument specified. Loading names for Cluster Policies drop-down." + names, err := w.ClusterPolicies.PolicyNameToPolicyIdMap(ctx, compute.ListClusterPoliciesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Cluster Policies drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "The ID of the policy to update") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have the id of the policy to update") + } editReq.PolicyId = args[0] } - if !cmd.Flags().Changed("json") { - editReq.Name = args[1] - } err = w.ClusterPolicies.Edit(ctx, editReq) if err != nil { @@ -353,7 +351,7 @@ func newGet() *cobra.Command { Get a cluster policy entity. Creation and editing is available to admins only. Arguments: - POLICY_ID: Canonical unique identifier for the cluster policy.` + POLICY_ID: Canonical unique identifier for the Cluster Policy.` cmd.Annotations = make(map[string]string) @@ -370,7 +368,7 @@ func newGet() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Cluster Policies drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "Canonical unique identifier for the cluster policy") + id, err := cmdio.Select(ctx, names, "Canonical unique identifier for the Cluster Policy") if err != nil { return err } diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index abde1bb71..a64a6ab7c 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -43,11 +43,10 @@ func New() *cobra.Command { manually terminate and restart an all-purpose cluster. Multiple users can share such clusters to do collaborative interactive analysis. - IMPORTANT: Databricks retains cluster configuration information for up to 200 - all-purpose clusters terminated in the last 30 days and up to 30 job clusters - recently terminated by the job scheduler. To keep an all-purpose cluster - configuration even after it has been terminated for more than 30 days, an - administrator can pin a cluster to the cluster list.`, + IMPORTANT: Databricks retains cluster configuration information for terminated + clusters for 30 days. To keep an all-purpose cluster configuration even after + it has been terminated for more than 30 days, an administrator can pin a + cluster to the cluster list.`, GroupID: "compute", Annotations: map[string]string{ "package": "compute", @@ -74,6 +73,7 @@ func New() *cobra.Command { cmd.AddCommand(newSparkVersions()) cmd.AddCommand(newStart()) cmd.AddCommand(newUnpin()) + cmd.AddCommand(newUpdate()) cmd.AddCommand(newUpdatePermissions()) // Apply optional overrides to this command. @@ -885,21 +885,18 @@ func newList() *cobra.Command { // TODO: short flags - cmd.Flags().StringVar(&listReq.CanUseClient, "can-use-client", listReq.CanUseClient, `Filter clusters based on what type of client it can be used for.`) + // TODO: complex arg: filter_by + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of clusters respectively.`) + // TODO: complex arg: sort_by cmd.Use = "list" - cmd.Short = `List all clusters.` - cmd.Long = `List all clusters. + cmd.Short = `List clusters.` + cmd.Long = `List clusters. - Return information about all pinned clusters, active clusters, up to 200 of - the most recently terminated all-purpose clusters in the past 30 days, and up - to 30 of the most recently terminated job clusters in the past 30 days. - - For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated - all-purpose clusters in the past 30 days, and 50 terminated job clusters in - the past 30 days, then this API returns the 1 pinned cluster, 4 active - clusters, all 45 terminated all-purpose clusters, and the 30 most recently - terminated job clusters.` + Return information about all pinned and active clusters, and all clusters + terminated within the last 30 days. Clusters terminated prior to this period + are not included.` cmd.Annotations = make(map[string]string) @@ -1753,6 +1750,117 @@ func newUnpin() *cobra.Command { return cmd } +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *compute.UpdateCluster, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq compute.UpdateCluster + var updateJson flags.JsonFlag + + var updateSkipWait bool + var updateTimeout time.Duration + + cmd.Flags().BoolVar(&updateSkipWait, "no-wait", updateSkipWait, `do not wait to reach RUNNING state`) + cmd.Flags().DurationVar(&updateTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach RUNNING state`) + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: cluster + + cmd.Use = "update CLUSTER_ID UPDATE_MASK" + cmd.Short = `Update cluster configuration (partial).` + cmd.Long = `Update cluster configuration (partial). + + Updates the configuration of a cluster to match the partial set of attributes + and size. Denote which fields to update using the update_mask field in the + request body. A cluster can be updated if it is in a RUNNING or TERMINATED + state. If a cluster is updated while in a RUNNING state, it will be + restarted so that the new attributes can take effect. If a cluster is updated + while in a TERMINATED state, it will remain TERMINATED. The updated + attributes will take effect the next time the cluster is started using the + clusters/start API. Attempts to update a cluster in any other state will be + rejected with an INVALID_STATE error code. Clusters created by the + Databricks Jobs service cannot be updated. + + Arguments: + CLUSTER_ID: ID of the cluster. + UPDATE_MASK: Specifies which fields of the cluster will be updated. This is required in + the POST request. The update mask should be supplied as a single string. + To specify multiple fields, separate them with commas (no spaces). To + delete a field from a cluster configuration, add it to the update_mask + string but omit it from the cluster object.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id', 'update_mask' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + updateReq.ClusterId = args[0] + } + if !cmd.Flags().Changed("json") { + updateReq.UpdateMask = args[1] + } + + wait, err := w.Clusters.Update(ctx, updateReq) + if err != nil { + return err + } + if updateSkipWait { + return nil + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *compute.ClusterDetails) { + statusMessage := i.StateMessage + spinner <- statusMessage + }).GetWithTimeout(updateTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + // start update-permissions command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 7ad9389a8..75664c79c 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -4,6 +4,7 @@ package workspace import ( alerts "github.com/databricks/cli/cmd/workspace/alerts" + alerts_legacy "github.com/databricks/cli/cmd/workspace/alerts-legacy" apps "github.com/databricks/cli/cmd/workspace/apps" artifact_allowlists "github.com/databricks/cli/cmd/workspace/artifact-allowlists" catalogs "github.com/databricks/cli/cmd/workspace/catalogs" @@ -24,6 +25,7 @@ import ( experiments "github.com/databricks/cli/cmd/workspace/experiments" external_locations "github.com/databricks/cli/cmd/workspace/external-locations" functions "github.com/databricks/cli/cmd/workspace/functions" + genie "github.com/databricks/cli/cmd/workspace/genie" git_credentials "github.com/databricks/cli/cmd/workspace/git-credentials" global_init_scripts "github.com/databricks/cli/cmd/workspace/global-init-scripts" grants "github.com/databricks/cli/cmd/workspace/grants" @@ -37,6 +39,7 @@ import ( metastores "github.com/databricks/cli/cmd/workspace/metastores" model_registry "github.com/databricks/cli/cmd/workspace/model-registry" model_versions "github.com/databricks/cli/cmd/workspace/model-versions" + notification_destinations "github.com/databricks/cli/cmd/workspace/notification-destinations" online_tables "github.com/databricks/cli/cmd/workspace/online-tables" permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration" permissions "github.com/databricks/cli/cmd/workspace/permissions" @@ -52,8 +55,10 @@ import ( providers "github.com/databricks/cli/cmd/workspace/providers" quality_monitors "github.com/databricks/cli/cmd/workspace/quality-monitors" queries "github.com/databricks/cli/cmd/workspace/queries" + queries_legacy "github.com/databricks/cli/cmd/workspace/queries-legacy" query_history "github.com/databricks/cli/cmd/workspace/query-history" query_visualizations "github.com/databricks/cli/cmd/workspace/query-visualizations" + query_visualizations_legacy "github.com/databricks/cli/cmd/workspace/query-visualizations-legacy" recipient_activation "github.com/databricks/cli/cmd/workspace/recipient-activation" recipients "github.com/databricks/cli/cmd/workspace/recipients" registered_models "github.com/databricks/cli/cmd/workspace/registered-models" @@ -85,6 +90,7 @@ func All() []*cobra.Command { var out []*cobra.Command out = append(out, alerts.New()) + out = append(out, alerts_legacy.New()) out = append(out, apps.New()) out = append(out, artifact_allowlists.New()) out = append(out, catalogs.New()) @@ -105,6 +111,7 @@ func All() []*cobra.Command { out = append(out, experiments.New()) out = append(out, external_locations.New()) out = append(out, functions.New()) + out = append(out, genie.New()) out = append(out, git_credentials.New()) out = append(out, global_init_scripts.New()) out = append(out, grants.New()) @@ -118,6 +125,7 @@ func All() []*cobra.Command { out = append(out, metastores.New()) out = append(out, model_registry.New()) out = append(out, model_versions.New()) + out = append(out, notification_destinations.New()) out = append(out, online_tables.New()) out = append(out, permission_migration.New()) out = append(out, permissions.New()) @@ -133,8 +141,10 @@ func All() []*cobra.Command { out = append(out, providers.New()) out = append(out, quality_monitors.New()) out = append(out, queries.New()) + out = append(out, queries_legacy.New()) out = append(out, query_history.New()) out = append(out, query_visualizations.New()) + out = append(out, query_visualizations_legacy.New()) out = append(out, recipient_activation.New()) out = append(out, recipients.New()) out = append(out, registered_models.New()) diff --git a/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go b/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go index 6f3ba4b42..46fd27c6f 100755 --- a/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go +++ b/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go @@ -22,9 +22,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/consumer-installations/consumer-installations.go b/cmd/workspace/consumer-installations/consumer-installations.go index d176e5b39..92f61789f 100755 --- a/cmd/workspace/consumer-installations/consumer-installations.go +++ b/cmd/workspace/consumer-installations/consumer-installations.go @@ -26,9 +26,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/consumer-listings/consumer-listings.go b/cmd/workspace/consumer-listings/consumer-listings.go index 18f3fb39e..5a8f76e36 100755 --- a/cmd/workspace/consumer-listings/consumer-listings.go +++ b/cmd/workspace/consumer-listings/consumer-listings.go @@ -25,9 +25,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods @@ -186,14 +183,12 @@ func newList() *cobra.Command { // TODO: array: assets // TODO: array: categories - cmd.Flags().BoolVar(&listReq.IsAscending, "is-ascending", listReq.IsAscending, ``) cmd.Flags().BoolVar(&listReq.IsFree, "is-free", listReq.IsFree, `Filters each listing based on if it is free.`) cmd.Flags().BoolVar(&listReq.IsPrivateExchange, "is-private-exchange", listReq.IsPrivateExchange, `Filters each listing based on if it is a private exchange.`) cmd.Flags().BoolVar(&listReq.IsStaffPick, "is-staff-pick", listReq.IsStaffPick, `Filters each listing based on whether it is a staff pick.`) cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) // TODO: array: provider_ids - cmd.Flags().Var(&listReq.SortBy, "sort-by", `Criteria for sorting the resulting set of listings. Supported values: [SORT_BY_DATE, SORT_BY_RELEVANCE, SORT_BY_TITLE, SORT_BY_UNSPECIFIED]`) // TODO: array: tags cmd.Use = "list" @@ -249,13 +244,11 @@ func newSearch() *cobra.Command { // TODO: array: assets // TODO: array: categories - cmd.Flags().BoolVar(&searchReq.IsAscending, "is-ascending", searchReq.IsAscending, ``) cmd.Flags().BoolVar(&searchReq.IsFree, "is-free", searchReq.IsFree, ``) cmd.Flags().BoolVar(&searchReq.IsPrivateExchange, "is-private-exchange", searchReq.IsPrivateExchange, ``) cmd.Flags().IntVar(&searchReq.PageSize, "page-size", searchReq.PageSize, ``) cmd.Flags().StringVar(&searchReq.PageToken, "page-token", searchReq.PageToken, ``) // TODO: array: provider_ids - cmd.Flags().Var(&searchReq.SortBy, "sort-by", `. Supported values: [SORT_BY_DATE, SORT_BY_RELEVANCE, SORT_BY_TITLE, SORT_BY_UNSPECIFIED]`) cmd.Use = "search QUERY" cmd.Short = `Search listings.` diff --git a/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go index c55ca4ee1..8b0af3cc6 100755 --- a/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go +++ b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go @@ -26,9 +26,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/consumer-providers/consumer-providers.go b/cmd/workspace/consumer-providers/consumer-providers.go index 579a89516..ab84249e9 100755 --- a/cmd/workspace/consumer-providers/consumer-providers.go +++ b/cmd/workspace/consumer-providers/consumer-providers.go @@ -24,9 +24,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/data-sources/data-sources.go b/cmd/workspace/data-sources/data-sources.go index f310fe50a..9f8a9dcd7 100755 --- a/cmd/workspace/data-sources/data-sources.go +++ b/cmd/workspace/data-sources/data-sources.go @@ -27,10 +27,10 @@ func New() *cobra.Command { grep to search the response from this API for the name of your SQL warehouse as it appears in Databricks SQL. - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] + **Note**: A new version of the Databricks SQL API is now available. [Learn + more] - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -67,10 +67,10 @@ func newList() *cobra.Command { fields that appear in this API response are enumerated for clarity. However, you need only a SQL warehouse's id to create new queries against it. - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:warehouses/list instead. [Learn more] - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/genie/genie.go b/cmd/workspace/genie/genie.go new file mode 100755 index 000000000..e4a059091 --- /dev/null +++ b/cmd/workspace/genie/genie.go @@ -0,0 +1,437 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package genie + +import ( + "fmt" + "time" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/dashboards" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "genie", + Short: `Genie provides a no-code experience for business users, powered by AI/BI.`, + Long: `Genie provides a no-code experience for business users, powered by AI/BI. + Analysts set up spaces that business users can use to ask questions using + natural language. Genie uses data registered to Unity Catalog and requires at + least CAN USE permission on a Pro or Serverless SQL warehouse. Also, + Databricks Assistant must be enabled.`, + GroupID: "dashboards", + Annotations: map[string]string{ + "package": "dashboards", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreateMessage()) + cmd.AddCommand(newExecuteMessageQuery()) + cmd.AddCommand(newGetMessage()) + cmd.AddCommand(newGetMessageQueryResult()) + cmd.AddCommand(newStartConversation()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create-message command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createMessageOverrides []func( + *cobra.Command, + *dashboards.GenieCreateConversationMessageRequest, +) + +func newCreateMessage() *cobra.Command { + cmd := &cobra.Command{} + + var createMessageReq dashboards.GenieCreateConversationMessageRequest + var createMessageJson flags.JsonFlag + + var createMessageSkipWait bool + var createMessageTimeout time.Duration + + cmd.Flags().BoolVar(&createMessageSkipWait, "no-wait", createMessageSkipWait, `do not wait to reach COMPLETED state`) + cmd.Flags().DurationVar(&createMessageTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach COMPLETED state`) + // TODO: short flags + cmd.Flags().Var(&createMessageJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create-message SPACE_ID CONVERSATION_ID CONTENT" + cmd.Short = `Create conversation message.` + cmd.Long = `Create conversation message. + + Create new message in [conversation](:method:genie/startconversation). The AI + response uses all previously created messages in the conversation to respond. + + Arguments: + SPACE_ID: The ID associated with the Genie space where the conversation is started. + CONVERSATION_ID: The ID associated with the conversation. + CONTENT: User message content.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(2)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only SPACE_ID, CONVERSATION_ID as positional arguments. Provide 'content' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createMessageJson.Unmarshal(&createMessageReq) + if err != nil { + return err + } + } + createMessageReq.SpaceId = args[0] + createMessageReq.ConversationId = args[1] + if !cmd.Flags().Changed("json") { + createMessageReq.Content = args[2] + } + + wait, err := w.Genie.CreateMessage(ctx, createMessageReq) + if err != nil { + return err + } + if createMessageSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *dashboards.GenieMessage) { + status := i.Status + statusMessage := fmt.Sprintf("current status: %s", status) + spinner <- statusMessage + }).GetWithTimeout(createMessageTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createMessageOverrides { + fn(cmd, &createMessageReq) + } + + return cmd +} + +// start execute-message-query command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var executeMessageQueryOverrides []func( + *cobra.Command, + *dashboards.ExecuteMessageQueryRequest, +) + +func newExecuteMessageQuery() *cobra.Command { + cmd := &cobra.Command{} + + var executeMessageQueryReq dashboards.ExecuteMessageQueryRequest + + // TODO: short flags + + cmd.Use = "execute-message-query SPACE_ID CONVERSATION_ID MESSAGE_ID" + cmd.Short = `Execute SQL query in a conversation message.` + cmd.Long = `Execute SQL query in a conversation message. + + Execute the SQL query in the message. + + Arguments: + SPACE_ID: Genie space ID + CONVERSATION_ID: Conversation ID + MESSAGE_ID: Message ID` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + executeMessageQueryReq.SpaceId = args[0] + executeMessageQueryReq.ConversationId = args[1] + executeMessageQueryReq.MessageId = args[2] + + response, err := w.Genie.ExecuteMessageQuery(ctx, executeMessageQueryReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range executeMessageQueryOverrides { + fn(cmd, &executeMessageQueryReq) + } + + return cmd +} + +// start get-message command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getMessageOverrides []func( + *cobra.Command, + *dashboards.GenieGetConversationMessageRequest, +) + +func newGetMessage() *cobra.Command { + cmd := &cobra.Command{} + + var getMessageReq dashboards.GenieGetConversationMessageRequest + + // TODO: short flags + + cmd.Use = "get-message SPACE_ID CONVERSATION_ID MESSAGE_ID" + cmd.Short = `Get conversation message.` + cmd.Long = `Get conversation message. + + Get message from conversation. + + Arguments: + SPACE_ID: The ID associated with the Genie space where the target conversation is + located. + CONVERSATION_ID: The ID associated with the target conversation. + MESSAGE_ID: The ID associated with the target message from the identified + conversation.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getMessageReq.SpaceId = args[0] + getMessageReq.ConversationId = args[1] + getMessageReq.MessageId = args[2] + + response, err := w.Genie.GetMessage(ctx, getMessageReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getMessageOverrides { + fn(cmd, &getMessageReq) + } + + return cmd +} + +// start get-message-query-result command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getMessageQueryResultOverrides []func( + *cobra.Command, + *dashboards.GenieGetMessageQueryResultRequest, +) + +func newGetMessageQueryResult() *cobra.Command { + cmd := &cobra.Command{} + + var getMessageQueryResultReq dashboards.GenieGetMessageQueryResultRequest + + // TODO: short flags + + cmd.Use = "get-message-query-result SPACE_ID CONVERSATION_ID MESSAGE_ID" + cmd.Short = `Get conversation message SQL query result.` + cmd.Long = `Get conversation message SQL query result. + + Get the result of SQL query if the message has a query attachment. This is + only available if a message has a query attachment and the message status is + EXECUTING_QUERY. + + Arguments: + SPACE_ID: Genie space ID + CONVERSATION_ID: Conversation ID + MESSAGE_ID: Message ID` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getMessageQueryResultReq.SpaceId = args[0] + getMessageQueryResultReq.ConversationId = args[1] + getMessageQueryResultReq.MessageId = args[2] + + response, err := w.Genie.GetMessageQueryResult(ctx, getMessageQueryResultReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getMessageQueryResultOverrides { + fn(cmd, &getMessageQueryResultReq) + } + + return cmd +} + +// start start-conversation command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var startConversationOverrides []func( + *cobra.Command, + *dashboards.GenieStartConversationMessageRequest, +) + +func newStartConversation() *cobra.Command { + cmd := &cobra.Command{} + + var startConversationReq dashboards.GenieStartConversationMessageRequest + var startConversationJson flags.JsonFlag + + var startConversationSkipWait bool + var startConversationTimeout time.Duration + + cmd.Flags().BoolVar(&startConversationSkipWait, "no-wait", startConversationSkipWait, `do not wait to reach COMPLETED state`) + cmd.Flags().DurationVar(&startConversationTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach COMPLETED state`) + // TODO: short flags + cmd.Flags().Var(&startConversationJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "start-conversation SPACE_ID CONTENT" + cmd.Short = `Start conversation.` + cmd.Long = `Start conversation. + + Start a new conversation. + + Arguments: + SPACE_ID: The ID associated with the Genie space where you want to start a + conversation. + CONTENT: The text of the message that starts the conversation.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only SPACE_ID as positional arguments. Provide 'content' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = startConversationJson.Unmarshal(&startConversationReq) + if err != nil { + return err + } + } + startConversationReq.SpaceId = args[0] + if !cmd.Flags().Changed("json") { + startConversationReq.Content = args[1] + } + + wait, err := w.Genie.StartConversation(ctx, startConversationReq) + if err != nil { + return err + } + if startConversationSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *dashboards.GenieMessage) { + status := i.Status + statusMessage := fmt.Sprintf("current status: %s", status) + spinner <- statusMessage + }).GetWithTimeout(startConversationTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range startConversationOverrides { + fn(cmd, &startConversationReq) + } + + return cmd +} + +// end service Genie diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index d8a4dec4f..98e474d33 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -68,5 +68,9 @@ func Groups() []cobra.Group { ID: "marketplace", Title: "Marketplace", }, + { + ID: "apps", + Title: "Apps", + }, } } diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 50a045921..2d422fa8c 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -817,6 +817,7 @@ func newGetRun() *cobra.Command { cmd.Flags().BoolVar(&getRunReq.IncludeHistory, "include-history", getRunReq.IncludeHistory, `Whether to include the repair history in the response.`) cmd.Flags().BoolVar(&getRunReq.IncludeResolvedValues, "include-resolved-values", getRunReq.IncludeResolvedValues, `Whether to include resolved parameter values in the response.`) + cmd.Flags().StringVar(&getRunReq.PageToken, "page-token", getRunReq.PageToken, `To list the next page or the previous page of job tasks, set this field to the value of the next_page_token or prev_page_token returned in the GetJob response.`) cmd.Use = "get-run RUN_ID" cmd.Short = `Get a single job run.` diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 36eab0e7f..ef2d6845b 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -666,7 +666,7 @@ func newList() *cobra.Command { cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The number of dashboards to return per page.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token, received from a previous ListDashboards call.`) cmd.Flags().BoolVar(&listReq.ShowTrashed, "show-trashed", listReq.ShowTrashed, `The flag to include dashboards located in the trash.`) - cmd.Flags().Var(&listReq.View, "view", `Indicates whether to include all metadata from the dashboard in the response. Supported values: [DASHBOARD_VIEW_BASIC, DASHBOARD_VIEW_FULL]`) + cmd.Flags().Var(&listReq.View, "view", `DASHBOARD_VIEW_BASIConly includes summary metadata from the dashboard. Supported values: [DASHBOARD_VIEW_BASIC]`) cmd.Use = "list" cmd.Short = `List dashboards.` diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index 034cea2df..d2f054045 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -133,6 +133,7 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeAliases, "include-aliases", getReq.IncludeAliases, `Whether to include aliases associated with the model version in the response.`) cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include model versions in the response for which the principal can only access selective metadata for.`) cmd.Use = "get FULL_NAME VERSION" @@ -203,6 +204,8 @@ func newGetByAlias() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getByAliasReq.IncludeAliases, "include-aliases", getByAliasReq.IncludeAliases, `Whether to include aliases associated with the model version in the response.`) + cmd.Use = "get-by-alias FULL_NAME ALIAS" cmd.Short = `Get Model Version By Alias.` cmd.Long = `Get Model Version By Alias. diff --git a/cmd/workspace/notification-destinations/notification-destinations.go b/cmd/workspace/notification-destinations/notification-destinations.go new file mode 100755 index 000000000..5ad47cc95 --- /dev/null +++ b/cmd/workspace/notification-destinations/notification-destinations.go @@ -0,0 +1,342 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package notification_destinations + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "notification-destinations", + Short: `The notification destinations API lets you programmatically manage a workspace's notification destinations.`, + Long: `The notification destinations API lets you programmatically manage a + workspace's notification destinations. Notification destinations are used to + send notifications for query alerts and jobs to destinations outside of + Databricks. Only workspace admins can create, update, and delete notification + destinations.`, + GroupID: "settings", + Annotations: map[string]string{ + "package": "settings", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *settings.CreateNotificationDestinationRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq settings.CreateNotificationDestinationRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: config + cmd.Flags().StringVar(&createReq.DisplayName, "display-name", createReq.DisplayName, `The display name for the notification destination.`) + + cmd.Use = "create" + cmd.Short = `Create a notification destination.` + cmd.Long = `Create a notification destination. + + Creates a notification destination. Requires workspace admin permissions.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + + response, err := w.NotificationDestinations.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteNotificationDestinationRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteNotificationDestinationRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Delete a notification destination.` + cmd.Long = `Delete a notification destination. + + Deletes a notification destination. Requires workspace admin permissions.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Id = args[0] + + err = w.NotificationDestinations.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetNotificationDestinationRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetNotificationDestinationRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get a notification destination.` + cmd.Long = `Get a notification destination. + + Gets a notification destination.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.Id = args[0] + + response, err := w.NotificationDestinations.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *settings.ListNotificationDestinationsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq settings.ListNotificationDestinationsRequest + + // TODO: short flags + + cmd.Flags().Int64Var(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List notification destinations.` + cmd.Long = `List notification destinations. + + Lists notification destinations.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.NotificationDestinations.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateNotificationDestinationRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateNotificationDestinationRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: config + cmd.Flags().StringVar(&updateReq.DisplayName, "display-name", updateReq.DisplayName, `The display name for the notification destination.`) + + cmd.Use = "update ID" + cmd.Short = `Update a notification destination.` + cmd.Long = `Update a notification destination. + + Updates a notification destination. Requires workspace admin permissions. At + least one field is required in the request body.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.Id = args[0] + + response, err := w.NotificationDestinations.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service NotificationDestinations diff --git a/cmd/workspace/permission-migration/permission-migration.go b/cmd/workspace/permission-migration/permission-migration.go index 40d3f9a3b..2e50b1231 100755 --- a/cmd/workspace/permission-migration/permission-migration.go +++ b/cmd/workspace/permission-migration/permission-migration.go @@ -19,9 +19,9 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "permission-migration", - Short: `This spec contains undocumented permission migration APIs used in https://github.com/databrickslabs/ucx.`, - Long: `This spec contains undocumented permission migration APIs used in - https://github.com/databrickslabs/ucx.`, + Short: `APIs for migrating acl permissions, used only by the ucx tool: https://github.com/databrickslabs/ucx.`, + Long: `APIs for migrating acl permissions, used only by the ucx tool: + https://github.com/databrickslabs/ucx`, GroupID: "iam", Annotations: map[string]string{ "package": "iam", @@ -48,13 +48,13 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var migratePermissionsOverrides []func( *cobra.Command, - *iam.PermissionMigrationRequest, + *iam.MigratePermissionsRequest, ) func newMigratePermissions() *cobra.Command { cmd := &cobra.Command{} - var migratePermissionsReq iam.PermissionMigrationRequest + var migratePermissionsReq iam.MigratePermissionsRequest var migratePermissionsJson flags.JsonFlag // TODO: short flags @@ -65,14 +65,10 @@ func newMigratePermissions() *cobra.Command { cmd.Use = "migrate-permissions WORKSPACE_ID FROM_WORKSPACE_GROUP_NAME TO_ACCOUNT_GROUP_NAME" cmd.Short = `Migrate Permissions.` cmd.Long = `Migrate Permissions. - - Migrate a batch of permissions from a workspace local group to an account - group. Arguments: WORKSPACE_ID: WorkspaceId of the associated workspace where the permission migration - will occur. Both workspace group and account group must be in this - workspace. + will occur. FROM_WORKSPACE_GROUP_NAME: The name of the workspace group that permissions will be migrated from. TO_ACCOUNT_GROUP_NAME: The name of the account group that permissions will be migrated to.` diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index 57a7d1e5e..fd9c1a468 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -21,6 +21,9 @@ func New() *cobra.Command { Long: `Permissions API are used to create read, write, edit, update and manage access for various users on different objects and endpoints. + * **[Apps permissions](:service:apps)** — Manage which users can manage or + use apps. + * **[Cluster permissions](:service:clusters)** — Manage which users can manage, restart, or attach to clusters. @@ -59,7 +62,8 @@ func New() *cobra.Command { create or use tokens. * **[Workspace object permissions](:service:workspace)** — Manage which - users can read, run, edit, or manage directories, files, and notebooks. + users can read, run, edit, or manage alerts, dbsql-dashboards, directories, + files, notebooks and queries. For the mapping of the required permissions for specific actions or abilities and other important information, see [Access Control]. @@ -112,10 +116,10 @@ func newGet() *cobra.Command { parent objects or root object. Arguments: - REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: - authorization, clusters, cluster-policies, directories, experiments, - files, instance-pools, jobs, notebooks, pipelines, registered-models, - repos, serving-endpoints, or warehouses. + REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, + authorization, clusters, cluster-policies, dbsql-dashboards, directories, + experiments, files, instance-pools, jobs, notebooks, pipelines, queries, + registered-models, repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` cmd.Annotations = make(map[string]string) @@ -240,10 +244,10 @@ func newSet() *cobra.Command { parent objects or root object. Arguments: - REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: - authorization, clusters, cluster-policies, directories, experiments, - files, instance-pools, jobs, notebooks, pipelines, registered-models, - repos, serving-endpoints, or warehouses. + REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, + authorization, clusters, cluster-policies, dbsql-dashboards, directories, + experiments, files, instance-pools, jobs, notebooks, pipelines, queries, + registered-models, repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` cmd.Annotations = make(map[string]string) @@ -314,10 +318,10 @@ func newUpdate() *cobra.Command { their parent objects or root object. Arguments: - REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: - authorization, clusters, cluster-policies, directories, experiments, - files, instance-pools, jobs, notebooks, pipelines, registered-models, - repos, serving-endpoints, or warehouses. + REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: alerts, + authorization, clusters, cluster-policies, dbsql-dashboards, directories, + experiments, files, instance-pools, jobs, notebooks, pipelines, queries, + registered-models, repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/policy-families/policy-families.go b/cmd/workspace/policy-families/policy-families.go index beee6e963..cac23405b 100755 --- a/cmd/workspace/policy-families/policy-families.go +++ b/cmd/workspace/policy-families/policy-families.go @@ -60,11 +60,17 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().Int64Var(&getReq.Version, "version", getReq.Version, `The version number for the family to fetch.`) + cmd.Use = "get POLICY_FAMILY_ID" cmd.Short = `Get policy family information.` cmd.Long = `Get policy family information. - Retrieve the information for an policy family based on its identifier.` + Retrieve the information for an policy family based on its identifier and + version + + Arguments: + POLICY_FAMILY_ID: The family ID about which to retrieve information.` cmd.Annotations = make(map[string]string) @@ -115,14 +121,15 @@ func newList() *cobra.Command { // TODO: short flags - cmd.Flags().Int64Var(&listReq.MaxResults, "max-results", listReq.MaxResults, `The max number of policy families to return.`) + cmd.Flags().Int64Var(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of policy families to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`) cmd.Use = "list" cmd.Short = `List policy families.` cmd.Long = `List policy families. - Retrieve a list of policy families. This API is paginated.` + Returns the list of policy definition types available to use at their latest + version. This API is paginated.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go index 4ab36b5d0..a3f746214 100755 --- a/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go +++ b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go @@ -25,9 +25,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/provider-exchanges/provider-exchanges.go b/cmd/workspace/provider-exchanges/provider-exchanges.go index 7ff73e0d1..b92403755 100755 --- a/cmd/workspace/provider-exchanges/provider-exchanges.go +++ b/cmd/workspace/provider-exchanges/provider-exchanges.go @@ -26,9 +26,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/provider-files/provider-files.go b/cmd/workspace/provider-files/provider-files.go index 25e1addf5..62dcb6de9 100755 --- a/cmd/workspace/provider-files/provider-files.go +++ b/cmd/workspace/provider-files/provider-files.go @@ -26,9 +26,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/provider-listings/provider-listings.go b/cmd/workspace/provider-listings/provider-listings.go index 0abdf51d8..18c99c53d 100755 --- a/cmd/workspace/provider-listings/provider-listings.go +++ b/cmd/workspace/provider-listings/provider-listings.go @@ -26,9 +26,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go index a38d9f420..d18e2e578 100755 --- a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go +++ b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go @@ -26,9 +26,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go index 8cee6e4eb..bb3ca9666 100755 --- a/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go +++ b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go @@ -23,9 +23,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/provider-providers/provider-providers.go b/cmd/workspace/provider-providers/provider-providers.go index b7273a344..94d12d6f0 100755 --- a/cmd/workspace/provider-providers/provider-providers.go +++ b/cmd/workspace/provider-providers/provider-providers.go @@ -25,9 +25,6 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "marketplace", }, - - // This service is being previewed; hide from help output. - Hidden: true, } // Add methods diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index 7305191c8..af2737a0f 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -291,6 +291,8 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.DataProviderGlobalMetastoreId, "data-provider-global-metastore-id", listReq.DataProviderGlobalMetastoreId, `If not provided, all providers will be returned.`) + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of providers to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) cmd.Use = "list" cmd.Short = `List providers.` @@ -345,6 +347,9 @@ func newListShares() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&listSharesReq.MaxResults, "max-results", listSharesReq.MaxResults, `Maximum number of shares to return.`) + cmd.Flags().StringVar(&listSharesReq.PageToken, "page-token", listSharesReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "list-shares NAME" cmd.Short = `List shares by Provider.` cmd.Long = `List shares by Provider. diff --git a/cmd/workspace/queries-legacy/queries-legacy.go b/cmd/workspace/queries-legacy/queries-legacy.go new file mode 100755 index 000000000..fa78bb2b0 --- /dev/null +++ b/cmd/workspace/queries-legacy/queries-legacy.go @@ -0,0 +1,500 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package queries_legacy + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "queries-legacy", + Short: `These endpoints are used for CRUD operations on query definitions.`, + Long: `These endpoints are used for CRUD operations on query definitions. Query + definitions include the target SQL warehouse, query text, name, description, + tags, parameters, and visualizations. Queries can be scheduled using the + sql_task type of the Jobs API, e.g. :method:jobs/create. + + **Note**: A new version of the Databricks SQL API is now available. Please see + the latest version. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html`, + GroupID: "sql", + Annotations: map[string]string{ + "package": "sql", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRestore()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *sql.QueryPostContent, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq sql.QueryPostContent + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Create a new query definition.` + cmd.Long = `Create a new query definition. + + Creates a new query definition. Queries created with this endpoint belong to + the authenticated user making the request. + + The data_source_id field specifies the ID of the SQL warehouse to run this + query against. You can use the Data Sources API to see a complete list of + available SQL warehouses. Or you can copy the data_source_id from an + existing query. + + **Note**: You cannot add a visualization until you create the query. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/create instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.QueriesLegacy.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *sql.DeleteQueriesLegacyRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq sql.DeleteQueriesLegacyRequest + + // TODO: short flags + + cmd.Use = "delete QUERY_ID" + cmd.Short = `Delete a query.` + cmd.Long = `Delete a query. + + Moves a query to the trash. Trashed queries immediately disappear from + searches and list views, and they cannot be used for alerts. The trash is + deleted after 30 days. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/delete instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries Legacy drop-down." + names, err := w.QueriesLegacy.LegacyQueryNameToIdMap(ctx, sql.ListQueriesLegacyRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Queries Legacy drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.QueryId = args[0] + + err = w.QueriesLegacy.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *sql.GetQueriesLegacyRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq sql.GetQueriesLegacyRequest + + // TODO: short flags + + cmd.Use = "get QUERY_ID" + cmd.Short = `Get a query definition.` + cmd.Long = `Get a query definition. + + Retrieve a query object definition along with contextual permissions + information about the currently authenticated user. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/get instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries Legacy drop-down." + names, err := w.QueriesLegacy.LegacyQueryNameToIdMap(ctx, sql.ListQueriesLegacyRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Queries Legacy drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.QueryId = args[0] + + response, err := w.QueriesLegacy.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *sql.ListQueriesLegacyRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq sql.ListQueriesLegacyRequest + + // TODO: short flags + + cmd.Flags().StringVar(&listReq.Order, "order", listReq.Order, `Name of query attribute to order by.`) + cmd.Flags().IntVar(&listReq.Page, "page", listReq.Page, `Page number to retrieve.`) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Number of queries to return per page.`) + cmd.Flags().StringVar(&listReq.Q, "q", listReq.Q, `Full text search term.`) + + cmd.Use = "list" + cmd.Short = `Get a list of queries.` + cmd.Long = `Get a list of queries. + + Gets a list of queries. Optionally, this list can be filtered by a search + term. + + **Warning**: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/list instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.QueriesLegacy.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start restore command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var restoreOverrides []func( + *cobra.Command, + *sql.RestoreQueriesLegacyRequest, +) + +func newRestore() *cobra.Command { + cmd := &cobra.Command{} + + var restoreReq sql.RestoreQueriesLegacyRequest + + // TODO: short flags + + cmd.Use = "restore QUERY_ID" + cmd.Short = `Restore a query.` + cmd.Long = `Restore a query. + + Restore a query that has been moved to the trash. A restored query appears in + list views and searches. You can use restored queries for alerts. + + **Note**: A new version of the Databricks SQL API is now available. Please see + the latest version. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries Legacy drop-down." + names, err := w.QueriesLegacy.LegacyQueryNameToIdMap(ctx, sql.ListQueriesLegacyRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Queries Legacy drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + restoreReq.QueryId = args[0] + + err = w.QueriesLegacy.Restore(ctx, restoreReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range restoreOverrides { + fn(cmd, &restoreReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *sql.QueryEditContent, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq sql.QueryEditContent + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.DataSourceId, "data-source-id", updateReq.DataSourceId, `Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID.`) + cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `General description that conveys additional information about this query such as usage notes.`) + cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this query that appears in list views, widget headings, and on the query page.`) + // TODO: any: options + cmd.Flags().StringVar(&updateReq.Query, "query", updateReq.Query, `The text of the query to be run.`) + cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`) + // TODO: array: tags + + cmd.Use = "update QUERY_ID" + cmd.Short = `Change a query definition.` + cmd.Long = `Change a query definition. + + Modify this query definition. + + **Note**: You cannot undo this operation. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queries/update instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries Legacy drop-down." + names, err := w.QueriesLegacy.LegacyQueryNameToIdMap(ctx, sql.ListQueriesLegacyRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Queries Legacy drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + updateReq.QueryId = args[0] + + response, err := w.QueriesLegacy.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service QueriesLegacy diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index 650131974..fea01451a 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -19,16 +19,11 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "queries", - Short: `These endpoints are used for CRUD operations on query definitions.`, - Long: `These endpoints are used for CRUD operations on query definitions. Query - definitions include the target SQL warehouse, query text, name, description, - tags, parameters, and visualizations. Queries can be scheduled using the - sql_task type of the Jobs API, e.g. :method:jobs/create. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, + Short: `The queries API can be used to perform CRUD operations on queries.`, + Long: `The queries API can be used to perform CRUD operations on queries. A query is + a Databricks SQL object that includes the target SQL warehouse, query text, + name, description, tags, and parameters. Queries can be scheduled using the + sql_task type of the Jobs API, e.g. :method:jobs/create.`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -40,7 +35,7 @@ func New() *cobra.Command { cmd.AddCommand(newDelete()) cmd.AddCommand(newGet()) cmd.AddCommand(newList()) - cmd.AddCommand(newRestore()) + cmd.AddCommand(newListVisualizations()) cmd.AddCommand(newUpdate()) // Apply optional overrides to this command. @@ -57,39 +52,33 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *sql.QueryPostContent, + *sql.CreateQueryRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq sql.QueryPostContent + var createReq sql.CreateQueryRequest var createJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: complex arg: query + cmd.Use = "create" - cmd.Short = `Create a new query definition.` - cmd.Long = `Create a new query definition. + cmd.Short = `Create a query.` + cmd.Long = `Create a query. - Creates a new query definition. Queries created with this endpoint belong to - the authenticated user making the request. - - The data_source_id field specifies the ID of the SQL warehouse to run this - query against. You can use the Data Sources API to see a complete list of - available SQL warehouses. Or you can copy the data_source_id from an - existing query. - - **Note**: You cannot add a visualization until you create the query. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + Creates a query.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -100,8 +89,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } response, err := w.Queries.Create(ctx, createReq) @@ -129,28 +116,24 @@ func newCreate() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteOverrides []func( *cobra.Command, - *sql.DeleteQueryRequest, + *sql.TrashQueryRequest, ) func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteReq sql.DeleteQueryRequest + var deleteReq sql.TrashQueryRequest // TODO: short flags - cmd.Use = "delete QUERY_ID" + cmd.Use = "delete ID" cmd.Short = `Delete a query.` cmd.Long = `Delete a query. Moves a query to the trash. Trashed queries immediately disappear from - searches and list views, and they cannot be used for alerts. The trash is - deleted after 30 days. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + searches and list views, and cannot be used for alerts. You can restore a + trashed query through the UI. A trashed query is permanently deleted after 30 + days.` cmd.Annotations = make(map[string]string) @@ -161,8 +144,8 @@ func newDelete() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries drop-down." - names, err := w.Queries.QueryNameToIdMap(ctx, sql.ListQueriesRequest{}) + promptSpinner <- "No ID argument specified. Loading names for Queries drop-down." + names, err := w.Queries.ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx, sql.ListQueriesRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Queries drop-down. Please manually specify required arguments. Original error: %w", err) @@ -176,7 +159,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have ") } - deleteReq.QueryId = args[0] + deleteReq.Id = args[0] err = w.Queries.Delete(ctx, deleteReq) if err != nil { @@ -213,17 +196,11 @@ func newGet() *cobra.Command { // TODO: short flags - cmd.Use = "get QUERY_ID" - cmd.Short = `Get a query definition.` - cmd.Long = `Get a query definition. + cmd.Use = "get ID" + cmd.Short = `Get a query.` + cmd.Long = `Get a query. - Retrieve a query object definition along with contextual permissions - information about the currently authenticated user. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + Gets a query.` cmd.Annotations = make(map[string]string) @@ -234,8 +211,8 @@ func newGet() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries drop-down." - names, err := w.Queries.QueryNameToIdMap(ctx, sql.ListQueriesRequest{}) + promptSpinner <- "No ID argument specified. Loading names for Queries drop-down." + names, err := w.Queries.ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx, sql.ListQueriesRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Queries drop-down. Please manually specify required arguments. Original error: %w", err) @@ -249,7 +226,7 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have ") } - getReq.QueryId = args[0] + getReq.Id = args[0] response, err := w.Queries.Get(ctx, getReq) if err != nil { @@ -286,25 +263,16 @@ func newList() *cobra.Command { // TODO: short flags - cmd.Flags().StringVar(&listReq.Order, "order", listReq.Order, `Name of query attribute to order by.`) - cmd.Flags().IntVar(&listReq.Page, "page", listReq.Page, `Page number to retrieve.`) - cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Number of queries to return per page.`) - cmd.Flags().StringVar(&listReq.Q, "q", listReq.Q, `Full text search term.`) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) cmd.Use = "list" - cmd.Short = `Get a list of queries.` - cmd.Long = `Get a list of queries. + cmd.Short = `List queries.` + cmd.Long = `List queries. - Gets a list of queries. Optionally, this list can be filtered by a search - term. - - **Warning**: Calling this API concurrently 10 or more times could result in - throttling, service degradation, or a temporary ban. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + Gets a list of queries accessible to the user, ordered by creation time. + **Warning:** Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban.` cmd.Annotations = make(map[string]string) @@ -334,33 +302,33 @@ func newList() *cobra.Command { return cmd } -// start restore command +// start list-visualizations command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var restoreOverrides []func( +var listVisualizationsOverrides []func( *cobra.Command, - *sql.RestoreQueryRequest, + *sql.ListVisualizationsForQueryRequest, ) -func newRestore() *cobra.Command { +func newListVisualizations() *cobra.Command { cmd := &cobra.Command{} - var restoreReq sql.RestoreQueryRequest + var listVisualizationsReq sql.ListVisualizationsForQueryRequest // TODO: short flags - cmd.Use = "restore QUERY_ID" - cmd.Short = `Restore a query.` - cmd.Long = `Restore a query. + cmd.Flags().IntVar(&listVisualizationsReq.PageSize, "page-size", listVisualizationsReq.PageSize, ``) + cmd.Flags().StringVar(&listVisualizationsReq.PageToken, "page-token", listVisualizationsReq.PageToken, ``) + + cmd.Use = "list-visualizations ID" + cmd.Short = `List visualizations on a query.` + cmd.Long = `List visualizations on a query. - Restore a query that has been moved to the trash. A restored query appears in - list views and searches. You can use restored queries for alerts. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + Gets a list of visualizations on a query.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true cmd.Annotations = make(map[string]string) @@ -371,8 +339,8 @@ func newRestore() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries drop-down." - names, err := w.Queries.QueryNameToIdMap(ctx, sql.ListQueriesRequest{}) + promptSpinner <- "No ID argument specified. Loading names for Queries drop-down." + names, err := w.Queries.ListQueryObjectsResponseQueryDisplayNameToIdMap(ctx, sql.ListQueriesRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Queries drop-down. Please manually specify required arguments. Original error: %w", err) @@ -386,13 +354,10 @@ func newRestore() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have ") } - restoreReq.QueryId = args[0] + listVisualizationsReq.Id = args[0] - err = w.Queries.Restore(ctx, restoreReq) - if err != nil { - return err - } - return nil + response := w.Queries.ListVisualizations(ctx, listVisualizationsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -400,8 +365,8 @@ func newRestore() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range restoreOverrides { - fn(cmd, &restoreReq) + for _, fn := range listVisualizationsOverrides { + fn(cmd, &listVisualizationsReq) } return cmd @@ -413,41 +378,47 @@ func newRestore() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateOverrides []func( *cobra.Command, - *sql.QueryEditContent, + *sql.UpdateQueryRequest, ) func newUpdate() *cobra.Command { cmd := &cobra.Command{} - var updateReq sql.QueryEditContent + var updateReq sql.UpdateQueryRequest var updateJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.DataSourceId, "data-source-id", updateReq.DataSourceId, `Data source ID maps to the ID of the data source used by the resource and is distinct from the warehouse ID.`) - cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `General description that conveys additional information about this query such as usage notes.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this query that appears in list views, widget headings, and on the query page.`) - // TODO: any: options - cmd.Flags().StringVar(&updateReq.Query, "query", updateReq.Query, `The text of the query to be run.`) - cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`) - // TODO: array: tags + // TODO: complex arg: query - cmd.Use = "update QUERY_ID" - cmd.Short = `Change a query definition.` - cmd.Long = `Change a query definition. + cmd.Use = "update ID UPDATE_MASK" + cmd.Short = `Update a query.` + cmd.Long = `Update a query. - Modify this query definition. - - **Note**: You cannot undo this operation. - - **Note**: A new version of the Databricks SQL API will soon be available. - [Learn more] - - [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` + Updates a query. + + Arguments: + ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space).` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only ID as positional arguments. Provide 'update_mask' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -459,24 +430,10 @@ func newUpdate() *cobra.Command { return err } } - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No QUERY_ID argument specified. Loading names for Queries drop-down." - names, err := w.Queries.QueryNameToIdMap(ctx, sql.ListQueriesRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Queries drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "") - if err != nil { - return err - } - args = append(args, id) + updateReq.Id = args[0] + if !cmd.Flags().Changed("json") { + updateReq.UpdateMask = args[1] } - if len(args) != 1 { - return fmt.Errorf("expected to have ") - } - updateReq.QueryId = args[0] response, err := w.Queries.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/query-history/query-history.go b/cmd/workspace/query-history/query-history.go index 60d6004d9..5155b5cc0 100755 --- a/cmd/workspace/query-history/query-history.go +++ b/cmd/workspace/query-history/query-history.go @@ -15,9 +15,10 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "query-history", - Short: `Access the history of queries through SQL warehouses.`, - Long: `Access the history of queries through SQL warehouses.`, + Use: "query-history", + Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints, serverless compute, and DLT.`, + Long: `A service responsible for storing and retrieving the list of queries run + against SQL endpoints, serverless compute, and DLT.`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -52,7 +53,6 @@ func newList() *cobra.Command { // TODO: short flags // TODO: complex arg: filter_by - cmd.Flags().BoolVar(&listReq.IncludeMetrics, "include-metrics", listReq.IncludeMetrics, `Whether to include metrics about query.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Limit the number of results returned in one page.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`) @@ -60,9 +60,13 @@ func newList() *cobra.Command { cmd.Short = `List Queries.` cmd.Long = `List Queries. - List the history of queries through SQL warehouses. + List the history of queries through SQL warehouses, serverless compute, and + DLT. - You can filter by user ID, warehouse ID, status, and time range.` + You can filter by user ID, warehouse ID, status, and time range. Most recently + started queries are returned first (up to max_results in request). The + pagination token returned in response can be used to list subsequent query + statuses.` cmd.Annotations = make(map[string]string) @@ -76,8 +80,11 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response := w.QueryHistory.List(ctx, listReq) - return cmdio.RenderIterator(ctx, response) + response, err := w.QueryHistory.List(ctx, listReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go b/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go new file mode 100755 index 000000000..4f45ab23e --- /dev/null +++ b/cmd/workspace/query-visualizations-legacy/query-visualizations-legacy.go @@ -0,0 +1,253 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package query_visualizations_legacy + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/sql" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "query-visualizations-legacy", + Short: `This is an evolving API that facilitates the addition and removal of vizualisations from existing queries within the Databricks Workspace.`, + Long: `This is an evolving API that facilitates the addition and removal of + vizualisations from existing queries within the Databricks Workspace. Data + structures may change over time. + + **Note**: A new version of the Databricks SQL API is now available. Please see + the latest version. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html`, + GroupID: "sql", + Annotations: map[string]string{ + "package": "sql", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *sql.CreateQueryVisualizationsLegacyRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq sql.CreateQueryVisualizationsLegacyRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Add visualization to a query.` + cmd.Long = `Add visualization to a query. + + Creates visualization in the query. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queryvisualizations/create instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.QueryVisualizationsLegacy.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *sql.DeleteQueryVisualizationsLegacyRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq sql.DeleteQueryVisualizationsLegacyRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Remove visualization.` + cmd.Long = `Remove visualization. + + Removes a visualization from the query. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queryvisualizations/delete instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + + Arguments: + ID: Widget ID returned by :method:queryvizualisations/create` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Id = args[0] + + err = w.QueryVisualizationsLegacy.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *sql.LegacyVisualization, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq sql.LegacyVisualization + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Edit existing visualization.` + cmd.Long = `Edit existing visualization. + + Updates visualization in the query. + + **Note**: A new version of the Databricks SQL API is now available. Please use + :method:queryvisualizations/update instead. [Learn more] + + [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html + + Arguments: + ID: The UUID for this visualization.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.QueryVisualizationsLegacy.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service QueryVisualizationsLegacy diff --git a/cmd/workspace/query-visualizations/query-visualizations.go b/cmd/workspace/query-visualizations/query-visualizations.go index c94d83a82..042594529 100755 --- a/cmd/workspace/query-visualizations/query-visualizations.go +++ b/cmd/workspace/query-visualizations/query-visualizations.go @@ -19,10 +19,10 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "query-visualizations", - Short: `This is an evolving API that facilitates the addition and removal of vizualisations from existing queries within the Databricks Workspace.`, + Short: `This is an evolving API that facilitates the addition and removal of visualizations from existing queries in the Databricks Workspace.`, Long: `This is an evolving API that facilitates the addition and removal of - vizualisations from existing queries within the Databricks Workspace. Data - structures may change over time.`, + visualizations from existing queries in the Databricks Workspace. Data + structures can change over time.`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -51,24 +51,33 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *sql.CreateQueryVisualizationRequest, + *sql.CreateVisualizationRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq sql.CreateQueryVisualizationRequest + var createReq sql.CreateVisualizationRequest var createJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + // TODO: complex arg: visualization + cmd.Use = "create" - cmd.Short = `Add visualization to a query.` - cmd.Long = `Add visualization to a query.` + cmd.Short = `Add a visualization to a query.` + cmd.Long = `Add a visualization to a query. + + Adds a visualization to a query.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -79,8 +88,6 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } response, err := w.QueryVisualizations.Create(ctx, createReq) @@ -108,22 +115,21 @@ func newCreate() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var deleteOverrides []func( *cobra.Command, - *sql.DeleteQueryVisualizationRequest, + *sql.DeleteVisualizationRequest, ) func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteReq sql.DeleteQueryVisualizationRequest + var deleteReq sql.DeleteVisualizationRequest // TODO: short flags cmd.Use = "delete ID" - cmd.Short = `Remove visualization.` - cmd.Long = `Remove visualization. - - Arguments: - ID: Widget ID returned by :method:queryvizualisations/create` + cmd.Short = `Remove a visualization.` + cmd.Long = `Remove a visualization. + + Removes a visualization.` cmd.Annotations = make(map[string]string) @@ -164,29 +170,44 @@ func newDelete() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var updateOverrides []func( *cobra.Command, - *sql.Visualization, + *sql.UpdateVisualizationRequest, ) func newUpdate() *cobra.Command { cmd := &cobra.Command{} - var updateReq sql.Visualization + var updateReq sql.UpdateVisualizationRequest var updateJson flags.JsonFlag // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "update ID" - cmd.Short = `Edit existing visualization.` - cmd.Long = `Edit existing visualization. + // TODO: complex arg: visualization + + cmd.Use = "update ID UPDATE_MASK" + cmd.Short = `Update a visualization.` + cmd.Long = `Update a visualization. + + Updates a visualization. Arguments: - ID: The UUID for this visualization.` + ID: + UPDATE_MASK: Field mask is required to be passed into the PATCH request. Field mask + specifies which fields of the setting payload will be updated. The field + mask needs to be supplied as single string. To specify multiple fields in + the field mask, use comma as the separator (no space).` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := root.ExactArgs(1) + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only ID as positional arguments. Provide 'update_mask' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) return check(cmd, args) } @@ -200,10 +221,11 @@ func newUpdate() *cobra.Command { if err != nil { return err } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } updateReq.Id = args[0] + if !cmd.Flags().Changed("json") { + updateReq.UpdateMask = args[1] + } response, err := w.QueryVisualizations.Update(ctx, updateReq) if err != nil { diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index c21d8a8c0..f4472cf37 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -80,6 +80,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `Description about the recipient.`) cmd.Flags().StringVar(&createReq.DataRecipientGlobalMetastoreId, "data-recipient-global-metastore-id", createReq.DataRecipientGlobalMetastoreId, `The global Unity Catalog metastore id provided by the data recipient.`) + cmd.Flags().Int64Var(&createReq.ExpirationTime, "expiration-time", createReq.ExpirationTime, `Expiration timestamp of the token, in epoch milliseconds.`) // TODO: complex arg: ip_access_list cmd.Flags().StringVar(&createReq.Owner, "owner", createReq.Owner, `Username of the recipient owner.`) // TODO: complex arg: properties_kvpairs @@ -311,6 +312,8 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.DataRecipientGlobalMetastoreId, "data-recipient-global-metastore-id", listReq.DataRecipientGlobalMetastoreId, `If not provided, all recipients will be returned.`) + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of recipients to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) cmd.Use = "list" cmd.Short = `List share recipients.` @@ -449,6 +452,9 @@ func newSharePermissions() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&sharePermissionsReq.MaxResults, "max-results", sharePermissionsReq.MaxResults, `Maximum number of permissions to return.`) + cmd.Flags().StringVar(&sharePermissionsReq.PageToken, "page-token", sharePermissionsReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "share-permissions NAME" cmd.Short = `Get recipient share permissions.` cmd.Long = `Get recipient share permissions. @@ -523,6 +529,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Description about the recipient.`) + cmd.Flags().Int64Var(&updateReq.ExpirationTime, "expiration-time", updateReq.ExpirationTime, `Expiration timestamp of the token, in epoch milliseconds.`) // TODO: complex arg: ip_access_list cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the recipient.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of the recipient owner.`) diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index 08e11d686..5aa6cdf15 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -326,6 +326,7 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeAliases, "include-aliases", getReq.IncludeAliases, `Whether to include registered model aliases in the response.`) cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include registered models in the response for which the principal can only access selective metadata for.`) cmd.Use = "get FULL_NAME" diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index 710141913..3a398251f 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -147,6 +147,8 @@ func newDelete() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&deleteReq.Force, "force", deleteReq.Force, `Force deletion even if the schema is not empty.`) + cmd.Use = "delete FULL_NAME" cmd.Short = `Delete a schema.` cmd.Long = `Delete a schema. diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index c2fd779a7..67f870177 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -254,11 +254,19 @@ func newGet() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *sharing.ListSharesRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq sharing.ListSharesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of shares to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "list" cmd.Short = `List shares.` cmd.Long = `List shares. @@ -269,11 +277,17 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response := w.Shares.List(ctx) + + response := w.Shares.List(ctx, listReq) return cmdio.RenderIterator(ctx, response) } @@ -283,7 +297,7 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd @@ -305,6 +319,9 @@ func newSharePermissions() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&sharePermissionsReq.MaxResults, "max-results", sharePermissionsReq.MaxResults, `Maximum number of permissions to return.`) + cmd.Flags().StringVar(&sharePermissionsReq.PageToken, "page-token", sharePermissionsReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "share-permissions NAME" cmd.Short = `Get permissions.` cmd.Long = `Get permissions. @@ -455,6 +472,8 @@ func newUpdatePermissions() *cobra.Command { cmd.Flags().Var(&updatePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: changes + cmd.Flags().IntVar(&updatePermissionsReq.MaxResults, "max-results", updatePermissionsReq.MaxResults, `Maximum number of permissions to return.`) + cmd.Flags().StringVar(&updatePermissionsReq.PageToken, "page-token", updatePermissionsReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) cmd.Use = "update-permissions NAME" cmd.Short = `Update permissions.` diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index 3fe0580d7..292afbe84 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -177,6 +177,9 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of schemas to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "list METASTORE_ID" cmd.Short = `List system schemas.` cmd.Long = `List system schemas. diff --git a/cmd/workspace/workspace-bindings/workspace-bindings.go b/cmd/workspace/workspace-bindings/workspace-bindings.go index b7e0614ea..4993f1aff 100755 --- a/cmd/workspace/workspace-bindings/workspace-bindings.go +++ b/cmd/workspace/workspace-bindings/workspace-bindings.go @@ -3,6 +3,8 @@ package workspace_bindings import ( + "fmt" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -35,7 +37,8 @@ func New() *cobra.Command { (/api/2.1/unity-catalog/bindings/{securable_type}/{securable_name}) which introduces the ability to bind a securable in READ_ONLY mode (catalogs only). - Securables that support binding: - catalog`, + Securable types that support binding: - catalog - storage_credential - + external_location`, GroupID: "catalog", Annotations: map[string]string{ "package": "catalog", @@ -131,6 +134,9 @@ func newGetBindings() *cobra.Command { // TODO: short flags + cmd.Flags().IntVar(&getBindingsReq.MaxResults, "max-results", getBindingsReq.MaxResults, `Maximum number of workspace bindings to return.`) + cmd.Flags().StringVar(&getBindingsReq.PageToken, "page-token", getBindingsReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "get-bindings SECURABLE_TYPE SECURABLE_NAME" cmd.Short = `Get securable workspace bindings.` cmd.Long = `Get securable workspace bindings. @@ -139,7 +145,7 @@ func newGetBindings() *cobra.Command { or an owner of the securable. Arguments: - SECURABLE_TYPE: The type of the securable. + SECURABLE_TYPE: The type of the securable to bind to a workspace. SECURABLE_NAME: The name of the securable.` cmd.Annotations = make(map[string]string) @@ -154,14 +160,14 @@ func newGetBindings() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getBindingsReq.SecurableType = args[0] + _, err = fmt.Sscan(args[0], &getBindingsReq.SecurableType) + if err != nil { + return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) + } getBindingsReq.SecurableName = args[1] - response, err := w.WorkspaceBindings.GetBindings(ctx, getBindingsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.WorkspaceBindings.GetBindings(ctx, getBindingsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -275,7 +281,7 @@ func newUpdateBindings() *cobra.Command { admin or an owner of the securable. Arguments: - SECURABLE_TYPE: The type of the securable. + SECURABLE_TYPE: The type of the securable to bind to a workspace. SECURABLE_NAME: The name of the securable.` cmd.Annotations = make(map[string]string) @@ -296,7 +302,10 @@ func newUpdateBindings() *cobra.Command { return err } } - updateBindingsReq.SecurableType = args[0] + _, err = fmt.Sscan(args[0], &updateBindingsReq.SecurableType) + if err != nil { + return fmt.Errorf("invalid SECURABLE_TYPE: %s", args[0]) + } updateBindingsReq.SecurableName = args[1] response, err := w.WorkspaceBindings.UpdateBindings(ctx, updateBindingsReq) diff --git a/go.mod b/go.mod index 3f5af0815..1457a4d67 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.43.2 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.44.0 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause @@ -60,13 +60,13 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.23.0 // indirect - golang.org/x/net v0.25.0 // indirect + golang.org/x/crypto v0.24.0 // indirect + golang.org/x/net v0.26.0 // indirect golang.org/x/sys v0.23.0 // indirect golang.org/x/time v0.5.0 // indirect google.golang.org/api v0.182.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect - google.golang.org/grpc v1.64.0 // indirect + google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index f33a9562a..b2985955c 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.43.2 h1:4B+sHAYO5kFqwZNQRmsF70eecqsFX6i/0KfXoDFQT/E= -github.com/databricks/databricks-sdk-go v0.43.2/go.mod h1:nlzeOEgJ1Tmb5HyknBJ3GEorCZKWqEBoHprvPmTSNq8= +github.com/databricks/databricks-sdk-go v0.44.0 h1:9/FZACv4EFQIOYxfwYVKnY7v46xio9FKCw9tpKB2O/s= +github.com/databricks/databricks-sdk-go v0.44.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -176,8 +176,8 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= -golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= +golang.org/x/crypto v0.24.0 h1:mnl8DM0o513X8fdIkmyFE/5hTYxbwYOjDS/+rK6qpRI= +golang.org/x/crypto v0.24.0/go.mod h1:Z1PMYSOR5nyMcyAVAIQSKCDwalqy85Aqn1x3Ws4L5DM= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= @@ -192,8 +192,8 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= -golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= +golang.org/x/net v0.26.0 h1:soB7SVo0PWrY4vPW/+ay0jKDNScG2X9wFeYlXIvJsOQ= +golang.org/x/net v0.26.0/go.mod h1:5YKkiSynbBIh3p6iOc/vibscux0x38BZDkn8sCUPxHE= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.22.0 h1:BzDx2FehcG7jJwgWLELCdmLuxk2i+x9UDpSiss2u0ZA= golang.org/x/oauth2 v0.22.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= @@ -244,8 +244,8 @@ google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyac google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= -google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= +google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA= +google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/libs/databrickscfg/cfgpickers/clusters.go b/libs/databrickscfg/cfgpickers/clusters.go index d955be35b..cac1b08a7 100644 --- a/libs/databrickscfg/cfgpickers/clusters.go +++ b/libs/databrickscfg/cfgpickers/clusters.go @@ -134,9 +134,7 @@ func loadInteractiveClusters(ctx context.Context, w *databricks.WorkspaceClient, promptSpinner := cmdio.Spinner(ctx) promptSpinner <- "Loading list of clusters to select from" defer close(promptSpinner) - all, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{ - CanUseClient: "NOTEBOOKS", - }) + all, err := w.Clusters.ListAll(ctx, compute.ListClustersRequest{}) if err != nil { return nil, fmt.Errorf("list clusters: %w", err) } diff --git a/libs/databrickscfg/cfgpickers/clusters_test.go b/libs/databrickscfg/cfgpickers/clusters_test.go index 2e62f93a8..d17e86d4a 100644 --- a/libs/databrickscfg/cfgpickers/clusters_test.go +++ b/libs/databrickscfg/cfgpickers/clusters_test.go @@ -70,7 +70,7 @@ func TestFirstCompatibleCluster(t *testing.T) { cfg, server := qa.HTTPFixtures{ { Method: "GET", - Resource: "/api/2.0/clusters/list?can_use_client=NOTEBOOKS", + Resource: "/api/2.1/clusters/list?", Response: compute.ListClustersResponse{ Clusters: []compute.ClusterDetails{ { @@ -100,7 +100,7 @@ func TestFirstCompatibleCluster(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/clusters/spark-versions", + Resource: "/api/2.1/clusters/spark-versions", Response: compute.GetSparkVersionsResponse{ Versions: []compute.SparkVersion{ { @@ -125,7 +125,7 @@ func TestNoCompatibleClusters(t *testing.T) { cfg, server := qa.HTTPFixtures{ { Method: "GET", - Resource: "/api/2.0/clusters/list?can_use_client=NOTEBOOKS", + Resource: "/api/2.1/clusters/list?", Response: compute.ListClustersResponse{ Clusters: []compute.ClusterDetails{ { @@ -147,7 +147,7 @@ func TestNoCompatibleClusters(t *testing.T) { }, { Method: "GET", - Resource: "/api/2.0/clusters/spark-versions", + Resource: "/api/2.1/clusters/spark-versions", Response: compute.GetSparkVersionsResponse{ Versions: []compute.SparkVersion{ { From 7c5b650111b176ab61bce1241d514ff641815218 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Fri, 16 Aug 2024 10:32:38 +0200 Subject: [PATCH 12/36] Fix integration tests after Go SDK bump (#1686) ## Changes These 2 tests failed `TestAccAlertsCreateErrWhenNoArguments ` -> switched to legacy command for now, new one does not have a required request body (might be an OpenAPI spec issue https://github.com/databricks/databricks-sdk-go/blob/main/service/sql/model.go#L595), will follow up later `TestAccClustersList` -> increased channel size because new clusters API returns more clusters ## Tests Tests are green now --- internal/alerts_test.go | 2 +- internal/helpers.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/alerts_test.go b/internal/alerts_test.go index f34b404de..6d7544074 100644 --- a/internal/alerts_test.go +++ b/internal/alerts_test.go @@ -9,6 +9,6 @@ import ( func TestAccAlertsCreateErrWhenNoArguments(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := RequireErrorRun(t, "alerts", "create") + _, _, err := RequireErrorRun(t, "alerts-legacy", "create") assert.Equal(t, "please provide command input in JSON format by specifying the --json flag", err.Error()) } diff --git a/internal/helpers.go b/internal/helpers.go index 5d9aead1f..269030183 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -87,7 +87,7 @@ type cobraTestRunner struct { } func consumeLines(ctx context.Context, wg *sync.WaitGroup, r io.Reader) <-chan string { - ch := make(chan string, 1000) + ch := make(chan string, 10000) wg.Add(1) go func() { defer close(ch) From f99335e87145f1b18d9b8bdd6e376fb365be2c13 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 19 Aug 2024 12:00:21 +0200 Subject: [PATCH 13/36] Increased chan size for clusters test to pass (#1691) ## Changes Increased chan size for clusters test to pass --- internal/helpers.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/helpers.go b/internal/helpers.go index 269030183..419fa419c 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -87,7 +87,7 @@ type cobraTestRunner struct { } func consumeLines(ctx context.Context, wg *sync.WaitGroup, r io.Reader) <-chan string { - ch := make(chan string, 10000) + ch := make(chan string, 30000) wg.Add(1) go func() { defer close(ch) From beced9f1b5ec5bde8665f0a63c223829655a33f5 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 19 Aug 2024 13:27:05 +0200 Subject: [PATCH 14/36] [Release] Release v0.226.0 (#1683) CLI: * Add command line autocomplete to the fs commands ([#1622](https://github.com/databricks/cli/pull/1622)). * Add trailing slash to directory to produce completions for ([#1666](https://github.com/databricks/cli/pull/1666)). * Fix ability to import the CLI repository as module ([#1671](https://github.com/databricks/cli/pull/1671)). * Fix host resolution order in `auth login` ([#1370](https://github.com/databricks/cli/pull/1370)). * Print text logs in `import-dir` and `export-dir` commands ([#1682](https://github.com/databricks/cli/pull/1682)). Bundles: * Expand and upload local wheel libraries for all task types ([#1649](https://github.com/databricks/cli/pull/1649)). * Clarify file format required for the `config-file` flag in `bundle init` ([#1651](https://github.com/databricks/cli/pull/1651)). * Fixed incorrectly cleaning up python wheel dist folder ([#1656](https://github.com/databricks/cli/pull/1656)). * Merge job parameters based on their name ([#1659](https://github.com/databricks/cli/pull/1659)). * Fix glob expansion after running a generic build command ([#1662](https://github.com/databricks/cli/pull/1662)). * Upload local libraries even if they don't have artifact defined ([#1664](https://github.com/databricks/cli/pull/1664)). Internal: * Fix python wheel task integration tests ([#1648](https://github.com/databricks/cli/pull/1648)). * Skip pushing Terraform state after destroy ([#1667](https://github.com/databricks/cli/pull/1667)). * Enable Spark JAR task test ([#1658](https://github.com/databricks/cli/pull/1658)). * Run Spark JAR task test on multiple DBR versions ([#1665](https://github.com/databricks/cli/pull/1665)). * Stop tracking file path locations in bundle resources ([#1673](https://github.com/databricks/cli/pull/1673)). * Update VS Code settings to match latest value from IDE plugin ([#1677](https://github.com/databricks/cli/pull/1677)). * Use `service.NamedIdMap` to make lookup generation deterministic ([#1678](https://github.com/databricks/cli/pull/1678)). * [Internal] Remove dependency to the `openapi` package of the Go SDK ([#1676](https://github.com/databricks/cli/pull/1676)). * Upgrade TF provider to 1.50.0 ([#1681](https://github.com/databricks/cli/pull/1681)). * Upgrade Go SDK to 0.44.0 ([#1679](https://github.com/databricks/cli/pull/1679)). API Changes: * Changed `databricks account budgets create` command . New request type is . * Changed `databricks account budgets create` command to return . * Changed `databricks account budgets delete` command . New request type is . * Changed `databricks account budgets delete` command to return . * Changed `databricks account budgets get` command . New request type is . * Changed `databricks account budgets get` command to return . * Changed `databricks account budgets list` command to require request of . * Changed `databricks account budgets list` command to return . * Changed `databricks account budgets update` command . New request type is . * Changed `databricks account budgets update` command to return . * Added `databricks account usage-dashboards` command group. * Changed `databricks model-versions get` command to return . * Changed `databricks cluster-policies create` command with new required argument order. * Changed `databricks cluster-policies edit` command with new required argument order. * Added `databricks clusters update` command. * Added `databricks genie` command group. * Changed `databricks permission-migration migrate-permissions` command . New request type is . * Changed `databricks permission-migration migrate-permissions` command to return . * Changed `databricks account workspace-assignment delete` command to return . * Changed `databricks account workspace-assignment update` command with new required argument order. * Changed `databricks account custom-app-integration create` command with new required argument order. * Changed `databricks account custom-app-integration list` command to require request of . * Changed `databricks account published-app-integration list` command to require request of . * Removed `databricks apps` command group. * Added `databricks notification-destinations` command group. * Changed `databricks shares list` command to require request of . * Changed `databricks alerts create` command . New request type is . * Changed `databricks alerts delete` command . New request type is . * Changed `databricks alerts delete` command to return . * Changed `databricks alerts get` command with new required argument order. * Changed `databricks alerts list` command to require request of . * Changed `databricks alerts list` command to return . * Changed `databricks alerts update` command . New request type is . * Changed `databricks alerts update` command to return . * Changed `databricks queries create` command . New request type is . * Changed `databricks queries delete` command . New request type is . * Changed `databricks queries delete` command to return . * Changed `databricks queries get` command with new required argument order. * Changed `databricks queries list` command to return . * Removed `databricks queries restore` command. * Changed `databricks queries update` command . New request type is . * Added `databricks queries list-visualizations` command. * Changed `databricks query-visualizations create` command . New request type is . * Changed `databricks query-visualizations delete` command . New request type is . * Changed `databricks query-visualizations delete` command to return . * Changed `databricks query-visualizations update` command . New request type is . * Changed `databricks statement-execution execute-statement` command to return . * Changed `databricks statement-execution get-statement` command to return . * Added `databricks alerts-legacy` command group. * Added `databricks queries-legacy` command group. * Added `databricks query-visualizations-legacy` command group. OpenAPI commit f98c07f9c71f579de65d2587bb0292f83d10e55d (2024-08-12) Dependency updates: * Bump github.com/hashicorp/hc-install from 0.7.0 to 0.8.0 ([#1652](https://github.com/databricks/cli/pull/1652)). * Bump golang.org/x/sync from 0.7.0 to 0.8.0 ([#1655](https://github.com/databricks/cli/pull/1655)). * Bump golang.org/x/mod from 0.19.0 to 0.20.0 ([#1654](https://github.com/databricks/cli/pull/1654)). * Bump golang.org/x/oauth2 from 0.21.0 to 0.22.0 ([#1653](https://github.com/databricks/cli/pull/1653)). * Bump golang.org/x/text from 0.16.0 to 0.17.0 ([#1670](https://github.com/databricks/cli/pull/1670)). * Bump golang.org/x/term from 0.22.0 to 0.23.0 ([#1669](https://github.com/databricks/cli/pull/1669)). --- CHANGELOG.md | 91 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 91 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index d1e0b9a5a..39960e308 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,96 @@ # Version changelog +## [Release] Release v0.226.0 + +CLI: + * Add command line autocomplete to the fs commands ([#1622](https://github.com/databricks/cli/pull/1622)). + * Add trailing slash to directory to produce completions for ([#1666](https://github.com/databricks/cli/pull/1666)). + * Fix ability to import the CLI repository as module ([#1671](https://github.com/databricks/cli/pull/1671)). + * Fix host resolution order in `auth login` ([#1370](https://github.com/databricks/cli/pull/1370)). + * Print text logs in `import-dir` and `export-dir` commands ([#1682](https://github.com/databricks/cli/pull/1682)). + +Bundles: + * Expand and upload local wheel libraries for all task types ([#1649](https://github.com/databricks/cli/pull/1649)). + * Clarify file format required for the `config-file` flag in `bundle init` ([#1651](https://github.com/databricks/cli/pull/1651)). + * Fixed incorrectly cleaning up python wheel dist folder ([#1656](https://github.com/databricks/cli/pull/1656)). + * Merge job parameters based on their name ([#1659](https://github.com/databricks/cli/pull/1659)). + * Fix glob expansion after running a generic build command ([#1662](https://github.com/databricks/cli/pull/1662)). + * Upload local libraries even if they don't have artifact defined ([#1664](https://github.com/databricks/cli/pull/1664)). + +Internal: + * Fix python wheel task integration tests ([#1648](https://github.com/databricks/cli/pull/1648)). + * Skip pushing Terraform state after destroy ([#1667](https://github.com/databricks/cli/pull/1667)). + * Enable Spark JAR task test ([#1658](https://github.com/databricks/cli/pull/1658)). + * Run Spark JAR task test on multiple DBR versions ([#1665](https://github.com/databricks/cli/pull/1665)). + * Stop tracking file path locations in bundle resources ([#1673](https://github.com/databricks/cli/pull/1673)). + * Update VS Code settings to match latest value from IDE plugin ([#1677](https://github.com/databricks/cli/pull/1677)). + * Use `service.NamedIdMap` to make lookup generation deterministic ([#1678](https://github.com/databricks/cli/pull/1678)). + * [Internal] Remove dependency to the `openapi` package of the Go SDK ([#1676](https://github.com/databricks/cli/pull/1676)). + * Upgrade TF provider to 1.50.0 ([#1681](https://github.com/databricks/cli/pull/1681)). + * Upgrade Go SDK to 0.44.0 ([#1679](https://github.com/databricks/cli/pull/1679)). + +API Changes: + * Changed `databricks account budgets create` command . New request type is . + * Changed `databricks account budgets create` command to return . + * Changed `databricks account budgets delete` command . New request type is . + * Changed `databricks account budgets delete` command to return . + * Changed `databricks account budgets get` command . New request type is . + * Changed `databricks account budgets get` command to return . + * Changed `databricks account budgets list` command to require request of . + * Changed `databricks account budgets list` command to return . + * Changed `databricks account budgets update` command . New request type is . + * Changed `databricks account budgets update` command to return . + * Added `databricks account usage-dashboards` command group. + * Changed `databricks model-versions get` command to return . + * Changed `databricks cluster-policies create` command with new required argument order. + * Changed `databricks cluster-policies edit` command with new required argument order. + * Added `databricks clusters update` command. + * Added `databricks genie` command group. + * Changed `databricks permission-migration migrate-permissions` command . New request type is . + * Changed `databricks permission-migration migrate-permissions` command to return . + * Changed `databricks account workspace-assignment delete` command to return . + * Changed `databricks account workspace-assignment update` command with new required argument order. + * Changed `databricks account custom-app-integration create` command with new required argument order. + * Changed `databricks account custom-app-integration list` command to require request of . + * Changed `databricks account published-app-integration list` command to require request of . + * Removed `databricks apps` command group. + * Added `databricks notification-destinations` command group. + * Changed `databricks shares list` command to require request of . + * Changed `databricks alerts create` command . New request type is . + * Changed `databricks alerts delete` command . New request type is . + * Changed `databricks alerts delete` command to return . + * Changed `databricks alerts get` command with new required argument order. + * Changed `databricks alerts list` command to require request of . + * Changed `databricks alerts list` command to return . + * Changed `databricks alerts update` command . New request type is . + * Changed `databricks alerts update` command to return . + * Changed `databricks queries create` command . New request type is . + * Changed `databricks queries delete` command . New request type is . + * Changed `databricks queries delete` command to return . + * Changed `databricks queries get` command with new required argument order. + * Changed `databricks queries list` command to return . + * Removed `databricks queries restore` command. + * Changed `databricks queries update` command . New request type is . + * Added `databricks queries list-visualizations` command. + * Changed `databricks query-visualizations create` command . New request type is . + * Changed `databricks query-visualizations delete` command . New request type is . + * Changed `databricks query-visualizations delete` command to return . + * Changed `databricks query-visualizations update` command . New request type is . + * Changed `databricks statement-execution execute-statement` command to return . + * Changed `databricks statement-execution get-statement` command to return . + * Added `databricks alerts-legacy` command group. + * Added `databricks queries-legacy` command group. + * Added `databricks query-visualizations-legacy` command group. + +OpenAPI commit f98c07f9c71f579de65d2587bb0292f83d10e55d (2024-08-12) +Dependency updates: + * Bump github.com/hashicorp/hc-install from 0.7.0 to 0.8.0 ([#1652](https://github.com/databricks/cli/pull/1652)). + * Bump golang.org/x/sync from 0.7.0 to 0.8.0 ([#1655](https://github.com/databricks/cli/pull/1655)). + * Bump golang.org/x/mod from 0.19.0 to 0.20.0 ([#1654](https://github.com/databricks/cli/pull/1654)). + * Bump golang.org/x/oauth2 from 0.21.0 to 0.22.0 ([#1653](https://github.com/databricks/cli/pull/1653)). + * Bump golang.org/x/text from 0.16.0 to 0.17.0 ([#1670](https://github.com/databricks/cli/pull/1670)). + * Bump golang.org/x/term from 0.22.0 to 0.23.0 ([#1669](https://github.com/databricks/cli/pull/1669)). + ## 0.225.0 Bundles: From ab4e8099fb71af95b566f2d78e9d53523bdaa5c5 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Mon, 19 Aug 2024 15:24:56 +0200 Subject: [PATCH 15/36] Add `import` option for PyDABs (#1693) ## Changes Add 'import' option for PyDABs ## Tests Manually --- bundle/config/experimental.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 12048a322..66e975820 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -39,6 +39,12 @@ type PyDABs struct { // Required if PyDABs is enabled. PyDABs will load the code in the specified // environment. VEnvPath string `json:"venv_path,omitempty"` + + // Import contains a list Python packages with PyDABs code. + // + // These packages are imported to discover resources, resource generators, and mutators. + // This list can include namespace packages, which causes the import of nested packages. + Import []string `json:"import,omitempty"` } type Command string From 7de7583b37a84ee6d4a4f163ad4ba1d87207850f Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 19 Aug 2024 17:15:14 +0200 Subject: [PATCH 16/36] Make fileset take optional list of paths to list (#1684) ## Changes Before this change, the fileset library would take a single root path and list all files in it. To support an allowlist of paths to list (much like a Git `pathspec` without patterns; see [pathspec](pathspec)), this change introduces an optional argument to `fileset.New` where the caller can specify paths to list. If not specified, this argument defaults to list `.` (i.e. list all files in the root). The motivation for this change is that we wish to expose this pattern in bundles. Users should be able to specify which paths to synchronize instead of always only synchronizing the bundle root directory. [pathspec]: https://git-scm.com/docs/gitglossary#Documentation/gitglossary.txt-aiddefpathspecapathspec ## Tests New and existing unit tests. --- .../config/validate/validate_sync_patterns.go | 2 +- bundle/deploy/state_test.go | 4 +- bundle/deploy/state_update_test.go | 2 +- libs/fileset/fileset.go | 95 +++++++++--- libs/fileset/fileset_test.go | 144 ++++++++++++++++++ libs/fileset/glob_test.go | 18 ++- libs/fileset/testdata/dir1/a | 0 libs/fileset/testdata/dir1/b | 0 libs/fileset/testdata/dir2/a | 0 libs/fileset/testdata/dir2/b | 0 libs/fileset/testdata/dir3/a | 1 + libs/git/fileset.go | 10 +- libs/git/fileset_test.go | 4 +- libs/sync/snapshot_state_test.go | 2 +- libs/sync/snapshot_test.go | 28 ++-- libs/sync/sync.go | 6 +- 16 files changed, 257 insertions(+), 59 deletions(-) create mode 100644 libs/fileset/fileset_test.go create mode 100644 libs/fileset/testdata/dir1/a create mode 100644 libs/fileset/testdata/dir1/b create mode 100644 libs/fileset/testdata/dir2/a create mode 100644 libs/fileset/testdata/dir2/b create mode 120000 libs/fileset/testdata/dir3/a diff --git a/bundle/config/validate/validate_sync_patterns.go b/bundle/config/validate/validate_sync_patterns.go index fd011bf78..52f06835c 100644 --- a/bundle/config/validate/validate_sync_patterns.go +++ b/bundle/config/validate/validate_sync_patterns.go @@ -63,7 +63,7 @@ func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (di return err } - all, err := fs.All() + all, err := fs.Files() if err != nil { return err } diff --git a/bundle/deploy/state_test.go b/bundle/deploy/state_test.go index 5e1e54230..d149b0efa 100644 --- a/bundle/deploy/state_test.go +++ b/bundle/deploy/state_test.go @@ -18,7 +18,7 @@ func TestFromSlice(t *testing.T) { testutil.Touch(t, tmpDir, "test2.py") testutil.Touch(t, tmpDir, "test3.py") - files, err := fileset.All() + files, err := fileset.Files() require.NoError(t, err) f, err := FromSlice(files) @@ -38,7 +38,7 @@ func TestToSlice(t *testing.T) { testutil.Touch(t, tmpDir, "test2.py") testutil.Touch(t, tmpDir, "test3.py") - files, err := fileset.All() + files, err := fileset.Files() require.NoError(t, err) f, err := FromSlice(files) diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go index 2982546d5..72096d142 100644 --- a/bundle/deploy/state_update_test.go +++ b/bundle/deploy/state_update_test.go @@ -23,7 +23,7 @@ func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle { testutil.Touch(t, tmpDir, "test1.py") testutil.TouchNotebook(t, tmpDir, "test2.py") - files, err := fileset.New(vfs.MustNew(tmpDir)).All() + files, err := fileset.New(vfs.MustNew(tmpDir)).Files() require.NoError(t, err) return &bundle.Bundle{ diff --git a/libs/fileset/fileset.go b/libs/fileset/fileset.go index d0f00f97a..00c6dcfa4 100644 --- a/libs/fileset/fileset.go +++ b/libs/fileset/fileset.go @@ -3,25 +3,56 @@ package fileset import ( "fmt" "io/fs" - "os" + pathlib "path" + "path/filepath" + "slices" "github.com/databricks/cli/libs/vfs" ) -// FileSet facilitates fast recursive file listing of a path. +// FileSet facilitates recursive file listing for paths rooted at a given directory. // It optionally takes into account ignore rules through the [Ignorer] interface. type FileSet struct { // Root path of the fileset. root vfs.Path + // Paths to include in the fileset. + // Files are included as-is (if not ignored) and directories are traversed recursively. + // Defaults to []string{"."} if not specified. + paths []string + // Ignorer interface to check if a file or directory should be ignored. ignore Ignorer } // New returns a [FileSet] for the given root path. -func New(root vfs.Path) *FileSet { +// It optionally accepts a list of paths relative to the root to include in the fileset. +// If not specified, it defaults to including all files in the root path. +func New(root vfs.Path, args ...[]string) *FileSet { + // Default to including all files in the root path. + if len(args) == 0 { + args = [][]string{{"."}} + } + + // Collect list of normalized and cleaned paths. + var paths []string + for _, arg := range args { + for _, path := range arg { + path = filepath.ToSlash(path) + path = pathlib.Clean(path) + + // Skip path if it's already in the list. + if slices.Contains(paths, path) { + continue + } + + paths = append(paths, path) + } + } + return &FileSet{ root: root, + paths: paths, ignore: nopIgnorer{}, } } @@ -36,30 +67,38 @@ func (w *FileSet) SetIgnorer(ignore Ignorer) { w.ignore = ignore } -// Return all tracked files for Repo -func (w *FileSet) All() ([]File, error) { - return w.recursiveListFiles() +// Files returns performs recursive listing on all configured paths and returns +// the collection of files it finds (and are not ignored). +// The returned slice does not contain duplicates. +// The order of files in the slice is stable. +func (w *FileSet) Files() (out []File, err error) { + seen := make(map[string]struct{}) + for _, p := range w.paths { + files, err := w.recursiveListFiles(p, seen) + if err != nil { + return nil, err + } + out = append(out, files...) + } + return out, nil } // Recursively traverses dir in a depth first manner and returns a list of all files // that are being tracked in the FileSet (ie not being ignored for matching one of the // patterns in w.ignore) -func (w *FileSet) recursiveListFiles() (fileList []File, err error) { - err = fs.WalkDir(w.root, ".", func(name string, d fs.DirEntry, err error) error { +func (w *FileSet) recursiveListFiles(path string, seen map[string]struct{}) (out []File, err error) { + err = fs.WalkDir(w.root, path, func(name string, d fs.DirEntry, err error) error { if err != nil { return err } - // skip symlinks info, err := d.Info() if err != nil { return err } - if info.Mode()&os.ModeSymlink != 0 { - return nil - } - if d.IsDir() { + switch { + case info.Mode().IsDir(): ign, err := w.ignore.IgnoreDirectory(name) if err != nil { return fmt.Errorf("cannot check if %s should be ignored: %w", name, err) @@ -67,18 +106,28 @@ func (w *FileSet) recursiveListFiles() (fileList []File, err error) { if ign { return fs.SkipDir } - return nil + + case info.Mode().IsRegular(): + ign, err := w.ignore.IgnoreFile(name) + if err != nil { + return fmt.Errorf("cannot check if %s should be ignored: %w", name, err) + } + if ign { + return nil + } + + // Skip duplicates + if _, ok := seen[name]; ok { + return nil + } + + seen[name] = struct{}{} + out = append(out, NewFile(w.root, d, name)) + + default: + // Skip non-regular files (e.g. symlinks). } - ign, err := w.ignore.IgnoreFile(name) - if err != nil { - return fmt.Errorf("cannot check if %s should be ignored: %w", name, err) - } - if ign { - return nil - } - - fileList = append(fileList, NewFile(w.root, d, name)) return nil }) return diff --git a/libs/fileset/fileset_test.go b/libs/fileset/fileset_test.go new file mode 100644 index 000000000..be27b6b6f --- /dev/null +++ b/libs/fileset/fileset_test.go @@ -0,0 +1,144 @@ +package fileset + +import ( + "errors" + "testing" + + "github.com/databricks/cli/libs/vfs" + "github.com/stretchr/testify/assert" +) + +func TestFileSet_NoPaths(t *testing.T) { + fs := New(vfs.MustNew("testdata")) + files, err := fs.Files() + if !assert.NoError(t, err) { + return + } + + assert.Len(t, files, 4) + assert.Equal(t, "dir1/a", files[0].Relative) + assert.Equal(t, "dir1/b", files[1].Relative) + assert.Equal(t, "dir2/a", files[2].Relative) + assert.Equal(t, "dir2/b", files[3].Relative) +} + +func TestFileSet_ParentPath(t *testing.T) { + fs := New(vfs.MustNew("testdata"), []string{".."}) + _, err := fs.Files() + + // It is impossible to escape the root directory. + assert.Error(t, err) +} + +func TestFileSet_DuplicatePaths(t *testing.T) { + fs := New(vfs.MustNew("testdata"), []string{"dir1", "dir1"}) + files, err := fs.Files() + if !assert.NoError(t, err) { + return + } + + assert.Len(t, files, 2) + assert.Equal(t, "dir1/a", files[0].Relative) + assert.Equal(t, "dir1/b", files[1].Relative) +} + +func TestFileSet_OverlappingPaths(t *testing.T) { + fs := New(vfs.MustNew("testdata"), []string{"dir1", "dir1/a"}) + files, err := fs.Files() + if !assert.NoError(t, err) { + return + } + + assert.Len(t, files, 2) + assert.Equal(t, "dir1/a", files[0].Relative) + assert.Equal(t, "dir1/b", files[1].Relative) +} + +func TestFileSet_IgnoreDirError(t *testing.T) { + testError := errors.New("test error") + fs := New(vfs.MustNew("testdata")) + fs.SetIgnorer(testIgnorer{dirErr: testError}) + _, err := fs.Files() + assert.ErrorIs(t, err, testError) +} + +func TestFileSet_IgnoreDir(t *testing.T) { + fs := New(vfs.MustNew("testdata")) + fs.SetIgnorer(testIgnorer{dir: []string{"dir1"}}) + files, err := fs.Files() + if !assert.NoError(t, err) { + return + } + + assert.Len(t, files, 2) + assert.Equal(t, "dir2/a", files[0].Relative) + assert.Equal(t, "dir2/b", files[1].Relative) +} + +func TestFileSet_IgnoreFileError(t *testing.T) { + testError := errors.New("test error") + fs := New(vfs.MustNew("testdata")) + fs.SetIgnorer(testIgnorer{fileErr: testError}) + _, err := fs.Files() + assert.ErrorIs(t, err, testError) +} + +func TestFileSet_IgnoreFile(t *testing.T) { + fs := New(vfs.MustNew("testdata")) + fs.SetIgnorer(testIgnorer{file: []string{"dir1/a"}}) + files, err := fs.Files() + if !assert.NoError(t, err) { + return + } + + assert.Len(t, files, 3) + assert.Equal(t, "dir1/b", files[0].Relative) + assert.Equal(t, "dir2/a", files[1].Relative) + assert.Equal(t, "dir2/b", files[2].Relative) +} + +type testIgnorer struct { + // dir is a list of directories to ignore. Strings are compared verbatim. + dir []string + + // dirErr is an error to return when IgnoreDirectory is called. + dirErr error + + // file is a list of files to ignore. Strings are compared verbatim. + file []string + + // fileErr is an error to return when IgnoreFile is called. + fileErr error +} + +// IgnoreDirectory returns true if the path is in the dir list. +// If dirErr is set, it returns dirErr. +func (t testIgnorer) IgnoreDirectory(path string) (bool, error) { + if t.dirErr != nil { + return false, t.dirErr + } + + for _, d := range t.dir { + if d == path { + return true, nil + } + } + + return false, nil +} + +// IgnoreFile returns true if the path is in the file list. +// If fileErr is set, it returns fileErr. +func (t testIgnorer) IgnoreFile(path string) (bool, error) { + if t.fileErr != nil { + return false, t.fileErr + } + + for _, f := range t.file { + if f == path { + return true, nil + } + } + + return false, nil +} diff --git a/libs/fileset/glob_test.go b/libs/fileset/glob_test.go index 8418df73a..9eb786db9 100644 --- a/libs/fileset/glob_test.go +++ b/libs/fileset/glob_test.go @@ -24,15 +24,19 @@ func TestGlobFileset(t *testing.T) { entries, err := root.ReadDir(".") require.NoError(t, err) + // Remove testdata folder from entries + entries = slices.DeleteFunc(entries, func(de fs.DirEntry) bool { + return de.Name() == "testdata" + }) + g, err := NewGlobSet(root, []string{ "./*.go", }) require.NoError(t, err) - files, err := g.All() + files, err := g.Files() require.NoError(t, err) - // +1 as there's one folder in ../filer require.Equal(t, len(files), len(entries)) for _, f := range files { exists := slices.ContainsFunc(entries, func(de fs.DirEntry) bool { @@ -46,7 +50,7 @@ func TestGlobFileset(t *testing.T) { }) require.NoError(t, err) - files, err = g.All() + files, err = g.Files() require.NoError(t, err) require.Equal(t, len(files), 0) } @@ -61,7 +65,7 @@ func TestGlobFilesetWithRelativeRoot(t *testing.T) { }) require.NoError(t, err) - files, err := g.All() + files, err := g.Files() require.NoError(t, err) require.Equal(t, len(files), len(entries)) } @@ -82,7 +86,7 @@ func TestGlobFilesetRecursively(t *testing.T) { }) require.NoError(t, err) - files, err := g.All() + files, err := g.Files() require.NoError(t, err) require.ElementsMatch(t, entries, collectRelativePaths(files)) } @@ -103,7 +107,7 @@ func TestGlobFilesetDir(t *testing.T) { }) require.NoError(t, err) - files, err := g.All() + files, err := g.Files() require.NoError(t, err) require.ElementsMatch(t, entries, collectRelativePaths(files)) } @@ -124,7 +128,7 @@ func TestGlobFilesetDoubleQuotesWithFilePatterns(t *testing.T) { }) require.NoError(t, err) - files, err := g.All() + files, err := g.Files() require.NoError(t, err) require.ElementsMatch(t, entries, collectRelativePaths(files)) } diff --git a/libs/fileset/testdata/dir1/a b/libs/fileset/testdata/dir1/a new file mode 100644 index 000000000..e69de29bb diff --git a/libs/fileset/testdata/dir1/b b/libs/fileset/testdata/dir1/b new file mode 100644 index 000000000..e69de29bb diff --git a/libs/fileset/testdata/dir2/a b/libs/fileset/testdata/dir2/a new file mode 100644 index 000000000..e69de29bb diff --git a/libs/fileset/testdata/dir2/b b/libs/fileset/testdata/dir2/b new file mode 100644 index 000000000..e69de29bb diff --git a/libs/fileset/testdata/dir3/a b/libs/fileset/testdata/dir3/a new file mode 120000 index 000000000..5ac5651e9 --- /dev/null +++ b/libs/fileset/testdata/dir3/a @@ -0,0 +1 @@ +../dir1/a \ No newline at end of file diff --git a/libs/git/fileset.go b/libs/git/fileset.go index f1986aa20..bb1cd4692 100644 --- a/libs/git/fileset.go +++ b/libs/git/fileset.go @@ -7,15 +7,15 @@ import ( // FileSet is Git repository aware implementation of [fileset.FileSet]. // It forces checking if gitignore files have been modified every -// time a call to [FileSet.All] is made. +// time a call to [FileSet.Files] is made. type FileSet struct { fileset *fileset.FileSet view *View } // NewFileSet returns [FileSet] for the Git repository located at `root`. -func NewFileSet(root vfs.Path) (*FileSet, error) { - fs := fileset.New(root) +func NewFileSet(root vfs.Path, paths ...[]string) (*FileSet, error) { + fs := fileset.New(root, paths...) v, err := NewView(root) if err != nil { return nil, err @@ -35,9 +35,9 @@ func (f *FileSet) IgnoreDirectory(dir string) (bool, error) { return f.view.IgnoreDirectory(dir) } -func (f *FileSet) All() ([]fileset.File, error) { +func (f *FileSet) Files() ([]fileset.File, error) { f.view.repo.taintIgnoreRules() - return f.fileset.All() + return f.fileset.Files() } func (f *FileSet) EnsureValidGitIgnoreExists() error { diff --git a/libs/git/fileset_test.go b/libs/git/fileset_test.go index 4e6172bfd..37f3611d1 100644 --- a/libs/git/fileset_test.go +++ b/libs/git/fileset_test.go @@ -15,7 +15,7 @@ import ( func testFileSetAll(t *testing.T, root string) { fileSet, err := NewFileSet(vfs.MustNew(root)) require.NoError(t, err) - files, err := fileSet.All() + files, err := fileSet.Files() require.NoError(t, err) require.Len(t, files, 3) assert.Equal(t, path.Join("a", "b", "world.txt"), files[0].Relative) @@ -37,7 +37,7 @@ func TestFileSetNonCleanRoot(t *testing.T) { // This should yield the same result as above test. fileSet, err := NewFileSet(vfs.MustNew("./testdata/../testdata")) require.NoError(t, err) - files, err := fileSet.All() + files, err := fileSet.Files() require.NoError(t, err) assert.Len(t, files, 3) } diff --git a/libs/sync/snapshot_state_test.go b/libs/sync/snapshot_state_test.go index 92c14e8e0..248e5832c 100644 --- a/libs/sync/snapshot_state_test.go +++ b/libs/sync/snapshot_state_test.go @@ -13,7 +13,7 @@ import ( func TestSnapshotState(t *testing.T) { fileSet := fileset.New(vfs.MustNew("./testdata/sync-fileset")) - files, err := fileSet.All() + files, err := fileSet.Files() require.NoError(t, err) // Assert initial contents of the fileset diff --git a/libs/sync/snapshot_test.go b/libs/sync/snapshot_test.go index 050b5d965..b7830406d 100644 --- a/libs/sync/snapshot_test.go +++ b/libs/sync/snapshot_test.go @@ -47,7 +47,7 @@ func TestDiff(t *testing.T) { defer f2.Close(t) // New files are put - files, err := fileSet.All() + files, err := fileSet.Files() assert.NoError(t, err) change, err := state.diff(ctx, files) assert.NoError(t, err) @@ -62,7 +62,7 @@ func TestDiff(t *testing.T) { // world.txt is editted f2.Overwrite(t, "bunnies are cute.") assert.NoError(t, err) - files, err = fileSet.All() + files, err = fileSet.Files() assert.NoError(t, err) change, err = state.diff(ctx, files) assert.NoError(t, err) @@ -77,7 +77,7 @@ func TestDiff(t *testing.T) { // hello.txt is deleted f1.Remove(t) assert.NoError(t, err) - files, err = fileSet.All() + files, err = fileSet.Files() assert.NoError(t, err) change, err = state.diff(ctx, files) assert.NoError(t, err) @@ -113,7 +113,7 @@ func TestSymlinkDiff(t *testing.T) { err = os.Symlink(filepath.Join(projectDir, "foo"), filepath.Join(projectDir, "bar")) assert.NoError(t, err) - files, err := fileSet.All() + files, err := fileSet.Files() assert.NoError(t, err) change, err := state.diff(ctx, files) assert.NoError(t, err) @@ -141,7 +141,7 @@ func TestFolderDiff(t *testing.T) { defer f1.Close(t) f1.Overwrite(t, "# Databricks notebook source\nprint(\"abc\")") - files, err := fileSet.All() + files, err := fileSet.Files() assert.NoError(t, err) change, err := state.diff(ctx, files) assert.NoError(t, err) @@ -153,7 +153,7 @@ func TestFolderDiff(t *testing.T) { assert.Contains(t, change.put, "foo/bar.py") f1.Remove(t) - files, err = fileSet.All() + files, err = fileSet.Files() assert.NoError(t, err) change, err = state.diff(ctx, files) assert.NoError(t, err) @@ -184,7 +184,7 @@ func TestPythonNotebookDiff(t *testing.T) { defer foo.Close(t) // Case 1: notebook foo.py is uploaded - files, err := fileSet.All() + files, err := fileSet.Files() assert.NoError(t, err) foo.Overwrite(t, "# Databricks notebook source\nprint(\"abc\")") change, err := state.diff(ctx, files) @@ -199,7 +199,7 @@ func TestPythonNotebookDiff(t *testing.T) { // Case 2: notebook foo.py is converted to python script by removing // magic keyword foo.Overwrite(t, "print(\"abc\")") - files, err = fileSet.All() + files, err = fileSet.Files() assert.NoError(t, err) change, err = state.diff(ctx, files) assert.NoError(t, err) @@ -213,7 +213,7 @@ func TestPythonNotebookDiff(t *testing.T) { // Case 3: Python script foo.py is converted to a databricks notebook foo.Overwrite(t, "# Databricks notebook source\nprint(\"def\")") - files, err = fileSet.All() + files, err = fileSet.Files() assert.NoError(t, err) change, err = state.diff(ctx, files) assert.NoError(t, err) @@ -228,7 +228,7 @@ func TestPythonNotebookDiff(t *testing.T) { // Case 4: Python notebook foo.py is deleted, and its remote name is used in change.delete foo.Remove(t) assert.NoError(t, err) - files, err = fileSet.All() + files, err = fileSet.Files() assert.NoError(t, err) change, err = state.diff(ctx, files) assert.NoError(t, err) @@ -260,7 +260,7 @@ func TestErrorWhenIdenticalRemoteName(t *testing.T) { defer pythonFoo.Close(t) vanillaFoo := testfile.CreateFile(t, filepath.Join(projectDir, "foo")) defer vanillaFoo.Close(t) - files, err := fileSet.All() + files, err := fileSet.Files() assert.NoError(t, err) change, err := state.diff(ctx, files) assert.NoError(t, err) @@ -271,7 +271,7 @@ func TestErrorWhenIdenticalRemoteName(t *testing.T) { // errors out because they point to the same destination pythonFoo.Overwrite(t, "# Databricks notebook source\nprint(\"def\")") - files, err = fileSet.All() + files, err = fileSet.Files() assert.NoError(t, err) change, err = state.diff(ctx, files) assert.ErrorContains(t, err, "both foo and foo.py point to the same remote file location foo. Please remove one of them from your local project") @@ -296,7 +296,7 @@ func TestNoErrorRenameWithIdenticalRemoteName(t *testing.T) { pythonFoo := testfile.CreateFile(t, filepath.Join(projectDir, "foo.py")) defer pythonFoo.Close(t) pythonFoo.Overwrite(t, "# Databricks notebook source\n") - files, err := fileSet.All() + files, err := fileSet.Files() assert.NoError(t, err) change, err := state.diff(ctx, files) assert.NoError(t, err) @@ -308,7 +308,7 @@ func TestNoErrorRenameWithIdenticalRemoteName(t *testing.T) { sqlFoo := testfile.CreateFile(t, filepath.Join(projectDir, "foo.sql")) defer sqlFoo.Close(t) sqlFoo.Overwrite(t, "-- Databricks notebook source\n") - files, err = fileSet.All() + files, err = fileSet.Files() assert.NoError(t, err) change, err = state.diff(ctx, files) assert.NoError(t, err) diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 3d5bc61ec..ffcf3878e 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -195,14 +195,14 @@ func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { all := set.NewSetF(func(f fileset.File) string { return f.Relative }) - gitFiles, err := s.fileSet.All() + gitFiles, err := s.fileSet.Files() if err != nil { log.Errorf(ctx, "cannot list files: %s", err) return nil, err } all.Add(gitFiles...) - include, err := s.includeFileSet.All() + include, err := s.includeFileSet.Files() if err != nil { log.Errorf(ctx, "cannot list include files: %s", err) return nil, err @@ -210,7 +210,7 @@ func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { all.Add(include...) - exclude, err := s.excludeFileSet.All() + exclude, err := s.excludeFileSet.Files() if err != nil { log.Errorf(ctx, "cannot list exclude files: %s", err) return nil, err From 2b8cbc31cf03062287897b14af67aae55bd90f2a Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 19 Aug 2024 17:41:02 +0200 Subject: [PATCH 17/36] Pass through paths argument to libs/sync (#1689) ## Changes Requires #1684. ## Tests Ran the sync integration tests. --- bundle/deploy/files/sync.go | 8 +++++--- cmd/sync/sync.go | 6 +++++- cmd/sync/sync_test.go | 4 ++-- libs/sync/sync.go | 14 ++++++++------ libs/sync/watchdog.go | 2 +- 5 files changed, 21 insertions(+), 13 deletions(-) diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index a308668d3..dc45053f9 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -28,10 +28,12 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp } opts := &sync.SyncOptions{ - LocalPath: rb.BundleRoot(), + LocalRoot: rb.BundleRoot(), + Paths: []string{"."}, + Include: includes, + Exclude: rb.Config().Sync.Exclude, + RemotePath: rb.Config().Workspace.FilePath, - Include: includes, - Exclude: rb.Config().Sync.Exclude, Host: rb.WorkspaceClient().Config.Host, Full: false, diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index bab451593..23a4c018f 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -47,7 +47,11 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn } opts := sync.SyncOptions{ - LocalPath: vfs.MustNew(args[0]), + LocalRoot: vfs.MustNew(args[0]), + Paths: []string{"."}, + Include: nil, + Exclude: nil, + RemotePath: args[1], Full: f.full, PollInterval: f.interval, diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index 564aeae56..0d0c57385 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -33,7 +33,7 @@ func TestSyncOptionsFromBundle(t *testing.T) { f := syncFlags{} opts, err := f.syncOptionsFromBundle(New(), []string{}, b) require.NoError(t, err) - assert.Equal(t, tempDir, opts.LocalPath.Native()) + assert.Equal(t, tempDir, opts.LocalRoot.Native()) assert.Equal(t, "/Users/jane@doe.com/path", opts.RemotePath) assert.Equal(t, filepath.Join(tempDir, ".databricks", "bundle", "default"), opts.SnapshotBasePath) assert.NotNil(t, opts.WorkspaceClient) @@ -59,6 +59,6 @@ func TestSyncOptionsFromArgs(t *testing.T) { cmd.SetContext(root.SetWorkspaceClient(context.Background(), nil)) opts, err := f.syncOptionsFromArgs(cmd, []string{local, remote}) require.NoError(t, err) - assert.Equal(t, local, opts.LocalPath.Native()) + assert.Equal(t, local, opts.LocalRoot.Native()) assert.Equal(t, remote, opts.RemotePath) } diff --git a/libs/sync/sync.go b/libs/sync/sync.go index ffcf3878e..9eaebf2ad 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -16,10 +16,12 @@ import ( ) type SyncOptions struct { - LocalPath vfs.Path + LocalRoot vfs.Path + Paths []string + Include []string + Exclude []string + RemotePath string - Include []string - Exclude []string Full bool @@ -51,7 +53,7 @@ type Sync struct { // New initializes and returns a new [Sync] instance. func New(ctx context.Context, opts SyncOptions) (*Sync, error) { - fileSet, err := git.NewFileSet(opts.LocalPath) + fileSet, err := git.NewFileSet(opts.LocalRoot, opts.Paths) if err != nil { return nil, err } @@ -61,12 +63,12 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { return nil, err } - includeFileSet, err := fileset.NewGlobSet(opts.LocalPath, opts.Include) + includeFileSet, err := fileset.NewGlobSet(opts.LocalRoot, opts.Include) if err != nil { return nil, err } - excludeFileSet, err := fileset.NewGlobSet(opts.LocalPath, opts.Exclude) + excludeFileSet, err := fileset.NewGlobSet(opts.LocalRoot, opts.Exclude) if err != nil { return nil, err } diff --git a/libs/sync/watchdog.go b/libs/sync/watchdog.go index ca7ec46e9..cc2ca83c5 100644 --- a/libs/sync/watchdog.go +++ b/libs/sync/watchdog.go @@ -57,7 +57,7 @@ func (s *Sync) applyMkdir(ctx context.Context, localName string) error { func (s *Sync) applyPut(ctx context.Context, localName string) error { s.notifyProgress(ctx, EventActionPut, localName, 0.0) - localFile, err := s.LocalPath.Open(localName) + localFile, err := s.LocalRoot.Open(localName) if err != nil { return err } From 8238a6ad0aad46402e6a838d6272d34718c8ad92 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 19 Aug 2024 17:47:18 +0200 Subject: [PATCH 18/36] Remove reference to "dbt" in the default-sql template (#1696) ## Changes The `default-sql` template inadvertently mentioned dbt in one of the prompts. This PR removes that reference. --- .../templates/default-sql/databricks_template_schema.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/libs/template/templates/default-sql/databricks_template_schema.json b/libs/template/templates/default-sql/databricks_template_schema.json index 329f91962..113cbef64 100644 --- a/libs/template/templates/default-sql/databricks_template_schema.json +++ b/libs/template/templates/default-sql/databricks_template_schema.json @@ -13,7 +13,7 @@ "type": "string", "pattern": "^/sql/.\\../warehouses/[a-z0-9]+$", "pattern_match_failure_message": "Path must be of the form /sql/1.0/warehouses/", - "description": "\nPlease provide the HTTP Path of the SQL warehouse you would like to use with dbt during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", + "description": "\nPlease provide the HTTP Path of the SQL warehouse you would like to use during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", "order": 2 }, "default_catalog": { From 07627023f5be85e90dd9fc27bc83a3144fcccc10 Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 19 Aug 2024 18:27:57 +0200 Subject: [PATCH 19/36] Pause continuous pipelines when 'mode: development' is used (#1590) ## Changes This makes it so that the pipelines `continuous` property is set to false by default when using `mode: development`. --- bundle/config/mutator/process_target_mode.go | 1 + bundle/config/mutator/process_target_mode_test.go | 3 ++- 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 9db97907d..fb71f751b 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -71,6 +71,7 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagno for i := range r.Pipelines { r.Pipelines[i].Name = prefix + r.Pipelines[i].Name r.Pipelines[i].Development = true + r.Pipelines[i].Continuous = false // (pipelines don't yet support tags) } diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index f0c8aee9e..1a2e96fab 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -82,7 +82,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, }, Pipelines: map[string]*resources.Pipeline{ - "pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}}, + "pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1", Continuous: true}}, }, Experiments: map[string]*resources.MlflowExperiment{ "experiment1": {Experiment: &ml.Experiment{Name: "/Users/lennart.kats@databricks.com/experiment1"}}, @@ -145,6 +145,7 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Pipeline 1 assert.Equal(t, "[dev lennart] pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) + assert.Equal(t, false, b.Config.Resources.Pipelines["pipeline1"].Continuous) assert.True(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) // Experiment 1 From 78d0ac5c6afcafe9d03c52acd043f2a0235c2afa Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Mon, 19 Aug 2024 20:18:50 +0200 Subject: [PATCH 20/36] Add configurable presets for name prefixes, tags, etc. (#1490) ## Changes This adds configurable transformations based on the transformations currently seen in `mode: development`. Example databricks.yml showcasing how some transformations: ``` bundle: name: my_bundle targets: dev: presets: prefix: "myprefix_" # prefix all resource names with myprefix_ pipelines_development: true # set development to true by default for pipelines trigger_pause_status: PAUSED # set pause_status to PAUSED by default for all triggers and schedules jobs_max_concurrent_runs: 10 # set max_concurrent runs to 10 by default for all jobs tags: dev: true ``` ## Tests * Existing process_target_mode tests that were adapted to use this new code * Unit tests specific for the new mutator * Unit tests for config loading and merging * Manual e2e testing --- bundle/config/mutator/apply_presets.go | 209 ++++++++++++++++++ bundle/config/mutator/apply_presets_test.go | 196 ++++++++++++++++ bundle/config/mutator/process_target_mode.go | 130 +++++------ .../mutator/process_target_mode_test.go | 150 ++++++++++++- bundle/config/presets.go | 32 +++ bundle/config/root.go | 5 + bundle/config/target.go | 4 + bundle/phases/initialize.go | 1 + bundle/tests/presets/databricks.yml | 22 ++ bundle/tests/presets_test.go | 28 +++ 10 files changed, 687 insertions(+), 90 deletions(-) create mode 100644 bundle/config/mutator/apply_presets.go create mode 100644 bundle/config/mutator/apply_presets_test.go create mode 100644 bundle/config/presets.go create mode 100644 bundle/tests/presets/databricks.yml create mode 100644 bundle/tests/presets_test.go diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go new file mode 100644 index 000000000..42e6ab95f --- /dev/null +++ b/bundle/config/mutator/apply_presets.go @@ -0,0 +1,209 @@ +package mutator + +import ( + "context" + "path" + "slices" + "sort" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/textutil" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/ml" +) + +type applyPresets struct{} + +// Apply all presets, e.g. the prefix presets that +// adds a prefix to all names of all resources. +func ApplyPresets() *applyPresets { + return &applyPresets{} +} + +type Tag struct { + Key string + Value string +} + +func (m *applyPresets) Name() string { + return "ApplyPresets" +} + +func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + if d := validatePauseStatus(b); d != nil { + return d + } + + r := b.Config.Resources + t := b.Config.Presets + prefix := t.NamePrefix + tags := toTagArray(t.Tags) + + // Jobs presets: Prefix, Tags, JobsMaxConcurrentRuns, TriggerPauseStatus + for _, j := range r.Jobs { + j.Name = prefix + j.Name + if j.Tags == nil { + j.Tags = make(map[string]string) + } + for _, tag := range tags { + if j.Tags[tag.Key] == "" { + j.Tags[tag.Key] = tag.Value + } + } + if j.MaxConcurrentRuns == 0 { + j.MaxConcurrentRuns = t.JobsMaxConcurrentRuns + } + if t.TriggerPauseStatus != "" { + paused := jobs.PauseStatusPaused + if t.TriggerPauseStatus == config.Unpaused { + paused = jobs.PauseStatusUnpaused + } + + if j.Schedule != nil && j.Schedule.PauseStatus == "" { + j.Schedule.PauseStatus = paused + } + if j.Continuous != nil && j.Continuous.PauseStatus == "" { + j.Continuous.PauseStatus = paused + } + if j.Trigger != nil && j.Trigger.PauseStatus == "" { + j.Trigger.PauseStatus = paused + } + } + } + + // Pipelines presets: Prefix, PipelinesDevelopment + for i := range r.Pipelines { + r.Pipelines[i].Name = prefix + r.Pipelines[i].Name + if config.IsExplicitlyEnabled(t.PipelinesDevelopment) { + r.Pipelines[i].Development = true + } + if t.TriggerPauseStatus == config.Paused { + r.Pipelines[i].Continuous = false + } + + // As of 2024-06, pipelines don't yet support tags + } + + // Models presets: Prefix, Tags + for _, m := range r.Models { + m.Name = prefix + m.Name + for _, t := range tags { + exists := slices.ContainsFunc(m.Tags, func(modelTag ml.ModelTag) bool { + return modelTag.Key == t.Key + }) + if !exists { + // Only add this tag if the resource didn't include any tag that overrides its value. + m.Tags = append(m.Tags, ml.ModelTag{Key: t.Key, Value: t.Value}) + } + } + } + + // Experiments presets: Prefix, Tags + for _, e := range r.Experiments { + filepath := e.Name + dir := path.Dir(filepath) + base := path.Base(filepath) + if dir == "." { + e.Name = prefix + base + } else { + e.Name = dir + "/" + prefix + base + } + for _, t := range tags { + exists := false + for _, experimentTag := range e.Tags { + if experimentTag.Key == t.Key { + exists = true + break + } + } + if !exists { + e.Tags = append(e.Tags, ml.ExperimentTag{Key: t.Key, Value: t.Value}) + } + } + } + + // Model serving endpoint presets: Prefix + for i := range r.ModelServingEndpoints { + r.ModelServingEndpoints[i].Name = normalizePrefix(prefix) + r.ModelServingEndpoints[i].Name + + // As of 2024-06, model serving endpoints don't yet support tags + } + + // Registered models presets: Prefix + for i := range r.RegisteredModels { + r.RegisteredModels[i].Name = normalizePrefix(prefix) + r.RegisteredModels[i].Name + + // As of 2024-06, registered models don't yet support tags + } + + // Quality monitors presets: Prefix + if t.TriggerPauseStatus == config.Paused { + for i := range r.QualityMonitors { + // Remove all schedules from monitors, since they don't support pausing/unpausing. + // Quality monitors might support the "pause" property in the future, so at the + // CLI level we do respect that property if it is set to "unpaused." + if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused { + r.QualityMonitors[i].Schedule = nil + } + } + } + + // Schemas: Prefix + for i := range r.Schemas { + prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_" + r.Schemas[i].Name = prefix + r.Schemas[i].Name + // HTTP API for schemas doesn't yet support tags. It's only supported in + // the Databricks UI and via the SQL API. + } + + return nil +} + +func validatePauseStatus(b *bundle.Bundle) diag.Diagnostics { + p := b.Config.Presets.TriggerPauseStatus + if p == "" || p == config.Paused || p == config.Unpaused { + return nil + } + return diag.Diagnostics{{ + Summary: "Invalid value for trigger_pause_status, should be PAUSED or UNPAUSED", + Severity: diag.Error, + Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")}, + }} +} + +// toTagArray converts a map of tags to an array of tags. +// We sort tags so ensure stable ordering. +func toTagArray(tags map[string]string) []Tag { + var tagArray []Tag + if tags == nil { + return tagArray + } + for key, value := range tags { + tagArray = append(tagArray, Tag{Key: key, Value: value}) + } + sort.Slice(tagArray, func(i, j int) bool { + return tagArray[i].Key < tagArray[j].Key + }) + return tagArray +} + +// normalizePrefix prefixes strings like '[dev lennart] ' to 'dev_lennart_'. +// We leave unicode letters and numbers but remove all "special characters." +func normalizePrefix(prefix string) string { + prefix = strings.ReplaceAll(prefix, "[", "") + prefix = strings.Trim(prefix, " ") + + // If the prefix ends with a ']', we add an underscore to the end. + // This makes sure that we get names like "dev_user_endpoint" instead of "dev_userendpoint" + suffix := "" + if strings.HasSuffix(prefix, "]") { + suffix = "_" + } + + return textutil.NormalizeString(prefix) + suffix +} diff --git a/bundle/config/mutator/apply_presets_test.go b/bundle/config/mutator/apply_presets_test.go new file mode 100644 index 000000000..35dac1f7d --- /dev/null +++ b/bundle/config/mutator/apply_presets_test.go @@ -0,0 +1,196 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestApplyPresetsPrefix(t *testing.T) { + tests := []struct { + name string + prefix string + job *resources.Job + want string + }{ + { + name: "add prefix to job", + prefix: "prefix-", + job: &resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "job1", + }, + }, + want: "prefix-job1", + }, + { + name: "add empty prefix to job", + prefix: "", + job: &resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "job1", + }, + }, + want: "job1", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": tt.job, + }, + }, + Presets: config.Presets{ + NamePrefix: tt.prefix, + }, + }, + } + + ctx := context.Background() + diag := bundle.Apply(ctx, b, mutator.ApplyPresets()) + + if diag.HasError() { + t.Fatalf("unexpected error: %v", diag) + } + + require.Equal(t, tt.want, b.Config.Resources.Jobs["job1"].Name) + }) + } +} + +func TestApplyPresetsTags(t *testing.T) { + tests := []struct { + name string + tags map[string]string + job *resources.Job + want map[string]string + }{ + { + name: "add tags to job", + tags: map[string]string{"env": "dev"}, + job: &resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "job1", + Tags: nil, + }, + }, + want: map[string]string{"env": "dev"}, + }, + { + name: "merge tags with existing job tags", + tags: map[string]string{"env": "dev"}, + job: &resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "job1", + Tags: map[string]string{"team": "data"}, + }, + }, + want: map[string]string{"env": "dev", "team": "data"}, + }, + { + name: "don't override existing job tags", + tags: map[string]string{"env": "dev"}, + job: &resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "job1", + Tags: map[string]string{"env": "prod"}, + }, + }, + want: map[string]string{"env": "prod"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": tt.job, + }, + }, + Presets: config.Presets{ + Tags: tt.tags, + }, + }, + } + + ctx := context.Background() + diag := bundle.Apply(ctx, b, mutator.ApplyPresets()) + + if diag.HasError() { + t.Fatalf("unexpected error: %v", diag) + } + + tags := b.Config.Resources.Jobs["job1"].Tags + require.Equal(t, tt.want, tags) + }) + } +} + +func TestApplyPresetsJobsMaxConcurrentRuns(t *testing.T) { + tests := []struct { + name string + job *resources.Job + setting int + want int + }{ + { + name: "set max concurrent runs", + job: &resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "job1", + MaxConcurrentRuns: 0, + }, + }, + setting: 5, + want: 5, + }, + { + name: "do not override existing max concurrent runs", + job: &resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "job1", + MaxConcurrentRuns: 3, + }, + }, + setting: 5, + want: 3, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": tt.job, + }, + }, + Presets: config.Presets{ + JobsMaxConcurrentRuns: tt.setting, + }, + }, + } + ctx := context.Background() + diag := bundle.Apply(ctx, b, mutator.ApplyPresets()) + + if diag.HasError() { + t.Fatalf("unexpected error: %v", diag) + } + + require.Equal(t, tt.want, b.Config.Resources.Jobs["job1"].MaxConcurrentRuns) + }) + } +} diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index fb71f751b..92ed28689 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -2,17 +2,14 @@ package mutator import ( "context" - "path" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/log" - "github.com/databricks/databricks-sdk-go/service/catalog" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/databricks/databricks-sdk-go/service/ml" ) type processTargetMode struct{} @@ -30,103 +27,75 @@ func (m *processTargetMode) Name() string { // Mark all resources as being for 'development' purposes, i.e. // changing their their name, adding tags, and (in the future) // marking them as 'hidden' in the UI. -func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { +func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) { if !b.Config.Bundle.Deployment.Lock.IsExplicitlyEnabled() { log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true") disabled := false b.Config.Bundle.Deployment.Lock.Enabled = &disabled } - r := b.Config.Resources + t := &b.Config.Presets shortName := b.Config.Workspace.CurrentUser.ShortName - prefix := "[dev " + shortName + "] " - // Generate a normalized version of the short name that can be used as a tag value. - tagValue := b.Tagging.NormalizeValue(shortName) - - for i := range r.Jobs { - r.Jobs[i].Name = prefix + r.Jobs[i].Name - if r.Jobs[i].Tags == nil { - r.Jobs[i].Tags = make(map[string]string) - } - r.Jobs[i].Tags["dev"] = tagValue - if r.Jobs[i].MaxConcurrentRuns == 0 { - r.Jobs[i].MaxConcurrentRuns = developmentConcurrentRuns - } - - // Pause each job. As an exception, we don't pause jobs that are explicitly - // marked as "unpaused". This allows users to override the default behavior - // of the development mode. - if r.Jobs[i].Schedule != nil && r.Jobs[i].Schedule.PauseStatus != jobs.PauseStatusUnpaused { - r.Jobs[i].Schedule.PauseStatus = jobs.PauseStatusPaused - } - if r.Jobs[i].Continuous != nil && r.Jobs[i].Continuous.PauseStatus != jobs.PauseStatusUnpaused { - r.Jobs[i].Continuous.PauseStatus = jobs.PauseStatusPaused - } - if r.Jobs[i].Trigger != nil && r.Jobs[i].Trigger.PauseStatus != jobs.PauseStatusUnpaused { - r.Jobs[i].Trigger.PauseStatus = jobs.PauseStatusPaused - } + if t.NamePrefix == "" { + t.NamePrefix = "[dev " + shortName + "] " } - for i := range r.Pipelines { - r.Pipelines[i].Name = prefix + r.Pipelines[i].Name - r.Pipelines[i].Development = true - r.Pipelines[i].Continuous = false - // (pipelines don't yet support tags) + if t.Tags == nil { + t.Tags = map[string]string{} + } + _, exists := t.Tags["dev"] + if !exists { + t.Tags["dev"] = b.Tagging.NormalizeValue(shortName) } - for i := range r.Models { - r.Models[i].Name = prefix + r.Models[i].Name - r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: tagValue}) + if t.JobsMaxConcurrentRuns == 0 { + t.JobsMaxConcurrentRuns = developmentConcurrentRuns } - for i := range r.Experiments { - filepath := r.Experiments[i].Name - dir := path.Dir(filepath) - base := path.Base(filepath) - if dir == "." { - r.Experiments[i].Name = prefix + base - } else { - r.Experiments[i].Name = dir + "/" + prefix + base - } - r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: tagValue}) + if t.TriggerPauseStatus == "" { + t.TriggerPauseStatus = config.Paused } - for i := range r.ModelServingEndpoints { - prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_" - r.ModelServingEndpoints[i].Name = prefix + r.ModelServingEndpoints[i].Name - // (model serving doesn't yet support tags) + if !config.IsExplicitlyDisabled(t.PipelinesDevelopment) { + enabled := true + t.PipelinesDevelopment = &enabled } - - for i := range r.RegisteredModels { - prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_" - r.RegisteredModels[i].Name = prefix + r.RegisteredModels[i].Name - // (registered models in Unity Catalog don't yet support tags) - } - - for i := range r.QualityMonitors { - // Remove all schedules from monitors, since they don't support pausing/unpausing. - // Quality monitors might support the "pause" property in the future, so at the - // CLI level we do respect that property if it is set to "unpaused". - if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused { - r.QualityMonitors[i].Schedule = nil - } - } - - for i := range r.Schemas { - prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_" - r.Schemas[i].Name = prefix + r.Schemas[i].Name - // HTTP API for schemas doesn't yet support tags. It's only supported in - // the Databricks UI and via the SQL API. - } - - return nil } func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { + p := b.Config.Presets + u := b.Config.Workspace.CurrentUser + + // Make sure presets don't set the trigger status to UNPAUSED; + // this could be surprising since most users (and tools) expect triggers + // to be paused in development. + // (Note that there still is an exceptional case where users set the trigger + // status to UNPAUSED at the level of an individual object, whic hwas + // historically allowed.) + if p.TriggerPauseStatus == config.Unpaused { + return diag.Diagnostics{{ + Severity: diag.Error, + Summary: "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default", + Locations: []dyn.Location{b.Config.GetLocation("presets.trigger_pause_status")}, + }} + } + + // Make sure this development copy has unique names and paths to avoid conflicts if path := findNonUserPath(b); path != "" { return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path) } + if p.NamePrefix != "" && !strings.Contains(p.NamePrefix, u.ShortName) && !strings.Contains(p.NamePrefix, u.UserName) { + // Resources such as pipelines require a unique name, e.g. '[dev steve] my_pipeline'. + // For this reason we require the name prefix to contain the current username; + // it's a pitfall for users if they don't include it and later find out that + // only a single user can do development deployments. + return diag.Diagnostics{{ + Severity: diag.Error, + Summary: "prefix should contain the current username or ${workspace.current_user.short_name} to ensure uniqueness when using 'mode: development'", + Locations: []dyn.Location{b.Config.GetLocation("presets.name_prefix")}, + }} + } return nil } @@ -183,10 +152,11 @@ func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Di switch b.Config.Bundle.Mode { case config.Development: diags := validateDevelopmentMode(b) - if diags != nil { + if diags.HasError() { return diags } - return transformDevelopmentMode(ctx, b) + transformDevelopmentMode(ctx, b) + return diags case config.Production: isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName) return validateProductionMode(ctx, b, isPrincipal) diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index 1a2e96fab..1c8671b4c 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/tags" sdkconfig "github.com/databricks/databricks-sdk-go/config" "github.com/databricks/databricks-sdk-go/service/catalog" @@ -51,6 +52,7 @@ func mockBundle(mode config.Mode) *bundle.Bundle { Schedule: &jobs.CronSchedule{ QuartzCronExpression: "* * * * *", }, + Tags: map[string]string{"existing": "tag"}, }, }, "job2": { @@ -129,12 +131,13 @@ func mockBundle(mode config.Mode) *bundle.Bundle { func TestProcessTargetModeDevelopment(t *testing.T) { b := mockBundle(config.Development) - m := ProcessTargetMode() + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) diags := bundle.Apply(context.Background(), b, m) require.NoError(t, diags.Error()) // Job 1 assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name) + assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["existing"], "tag") assert.Equal(t, b.Config.Resources.Jobs["job1"].Tags["dev"], "lennart") assert.Equal(t, b.Config.Resources.Jobs["job1"].Schedule.PauseStatus, jobs.PauseStatusPaused) @@ -183,7 +186,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - diags := bundle.Apply(context.Background(), b, ProcessTargetMode()) + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) require.NoError(t, diags.Error()) // Assert that tag normalization took place. @@ -197,7 +201,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - diags := bundle.Apply(context.Background(), b, ProcessTargetMode()) + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) require.NoError(t, diags.Error()) // Assert that tag normalization took place (Azure allows more characters than AWS). @@ -211,17 +216,53 @@ func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - diags := bundle.Apply(context.Background(), b, ProcessTargetMode()) + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) require.NoError(t, diags.Error()) // Assert that tag normalization took place. assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"]) } +func TestValidateDevelopmentMode(t *testing.T) { + // Test with a valid development mode bundle + b := mockBundle(config.Development) + diags := validateDevelopmentMode(b) + require.NoError(t, diags.Error()) + + // Test with a bundle that has a non-user path + b.Config.Workspace.RootPath = "/Shared/.bundle/x/y/state" + diags = validateDevelopmentMode(b) + require.ErrorContains(t, diags.Error(), "root_path") + + // Test with a bundle that has an unpaused trigger pause status + b = mockBundle(config.Development) + b.Config.Presets.TriggerPauseStatus = config.Unpaused + diags = validateDevelopmentMode(b) + require.ErrorContains(t, diags.Error(), "UNPAUSED") + + // Test with a bundle that has a prefix not containing the username or short name + b = mockBundle(config.Development) + b.Config.Presets.NamePrefix = "[prod]" + diags = validateDevelopmentMode(b) + require.Len(t, diags, 1) + assert.Equal(t, diag.Error, diags[0].Severity) + assert.Contains(t, diags[0].Summary, "") + + // Test with a bundle that has valid user paths + b = mockBundle(config.Development) + b.Config.Workspace.RootPath = "/Users/lennart@company.com/.bundle/x/y/state" + b.Config.Workspace.StatePath = "/Users/lennart@company.com/.bundle/x/y/state" + b.Config.Workspace.FilePath = "/Users/lennart@company.com/.bundle/x/y/files" + b.Config.Workspace.ArtifactPath = "/Users/lennart@company.com/.bundle/x/y/artifacts" + diags = validateDevelopmentMode(b) + require.NoError(t, diags.Error()) +} + func TestProcessTargetModeDefault(t *testing.T) { b := mockBundle("") - m := ProcessTargetMode() + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) diags := bundle.Apply(context.Background(), b, m) require.NoError(t, diags.Error()) assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name) @@ -307,7 +348,7 @@ func TestAllResourcesMocked(t *testing.T) { func TestAllResourcesRenamed(t *testing.T) { b := mockBundle(config.Development) - m := ProcessTargetMode() + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) diags := bundle.Apply(context.Background(), b, m) require.NoError(t, diags.Error()) @@ -337,8 +378,7 @@ func TestDisableLocking(t *testing.T) { ctx := context.Background() b := mockBundle(config.Development) - err := bundle.Apply(ctx, b, ProcessTargetMode()) - require.Nil(t, err) + transformDevelopmentMode(ctx, b) assert.False(t, b.Config.Bundle.Deployment.Lock.IsEnabled()) } @@ -348,7 +388,97 @@ func TestDisableLockingDisabled(t *testing.T) { explicitlyEnabled := true b.Config.Bundle.Deployment.Lock.Enabled = &explicitlyEnabled - err := bundle.Apply(ctx, b, ProcessTargetMode()) - require.Nil(t, err) + transformDevelopmentMode(ctx, b) assert.True(t, b.Config.Bundle.Deployment.Lock.IsEnabled(), "Deployment lock should remain enabled in development mode when explicitly enabled") } + +func TestPrefixAlreadySet(t *testing.T) { + b := mockBundle(config.Development) + b.Config.Presets.NamePrefix = "custom_lennart_deploy_" + + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + + assert.Equal(t, "custom_lennart_deploy_job1", b.Config.Resources.Jobs["job1"].Name) +} + +func TestTagsAlreadySet(t *testing.T) { + b := mockBundle(config.Development) + b.Config.Presets.Tags = map[string]string{ + "custom": "tag", + "dev": "foo", + } + + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + + assert.Equal(t, "tag", b.Config.Resources.Jobs["job1"].Tags["custom"]) + assert.Equal(t, "foo", b.Config.Resources.Jobs["job1"].Tags["dev"]) +} + +func TestTagsNil(t *testing.T) { + b := mockBundle(config.Development) + b.Config.Presets.Tags = nil + + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + + assert.Equal(t, "lennart", b.Config.Resources.Jobs["job2"].Tags["dev"]) +} + +func TestTagsEmptySet(t *testing.T) { + b := mockBundle(config.Development) + b.Config.Presets.Tags = map[string]string{} + + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + + assert.Equal(t, "lennart", b.Config.Resources.Jobs["job2"].Tags["dev"]) +} + +func TestJobsMaxConcurrentRunsAlreadySet(t *testing.T) { + b := mockBundle(config.Development) + b.Config.Presets.JobsMaxConcurrentRuns = 10 + + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + + assert.Equal(t, 10, b.Config.Resources.Jobs["job1"].MaxConcurrentRuns) +} + +func TestJobsMaxConcurrentRunsDisabled(t *testing.T) { + b := mockBundle(config.Development) + b.Config.Presets.JobsMaxConcurrentRuns = 1 + + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + + assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].MaxConcurrentRuns) +} + +func TestTriggerPauseStatusWhenUnpaused(t *testing.T) { + b := mockBundle(config.Development) + b.Config.Presets.TriggerPauseStatus = config.Unpaused + + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) + require.ErrorContains(t, diags.Error(), "target with 'mode: development' cannot set trigger pause status to UNPAUSED by default") +} + +func TestPipelinesDevelopmentDisabled(t *testing.T) { + b := mockBundle(config.Development) + notEnabled := false + b.Config.Presets.PipelinesDevelopment = ¬Enabled + + m := bundle.Seq(ProcessTargetMode(), ApplyPresets()) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + + assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) +} diff --git a/bundle/config/presets.go b/bundle/config/presets.go new file mode 100644 index 000000000..61009a252 --- /dev/null +++ b/bundle/config/presets.go @@ -0,0 +1,32 @@ +package config + +const Paused = "PAUSED" +const Unpaused = "UNPAUSED" + +type Presets struct { + // NamePrefix to prepend to all resource names. + NamePrefix string `json:"name_prefix,omitempty"` + + // PipelinesDevelopment is the default value for the development field of pipelines. + PipelinesDevelopment *bool `json:"pipelines_development,omitempty"` + + // TriggerPauseStatus is the default value for the pause status of all triggers and schedules. + // Either config.Paused, config.Unpaused, or empty. + TriggerPauseStatus string `json:"trigger_pause_status,omitempty"` + + // JobsMaxConcurrentRuns is the default value for the max concurrent runs of jobs. + JobsMaxConcurrentRuns int `json:"jobs_max_concurrent_runs,omitempty"` + + // Tags to add to all resources. + Tags map[string]string `json:"tags,omitempty"` +} + +// IsExplicitlyEnabled tests whether this feature is explicitly enabled. +func IsExplicitlyEnabled(feature *bool) bool { + return feature != nil && *feature +} + +// IsExplicitlyDisabled tests whether this feature is explicitly disabled. +func IsExplicitlyDisabled(feature *bool) bool { + return feature != nil && !*feature +} diff --git a/bundle/config/root.go b/bundle/config/root.go index 2c6fe1a4a..86dc33921 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -60,6 +60,10 @@ type Root struct { // RunAs section allows to define an execution identity for jobs and pipelines runs RunAs *jobs.JobRunAs `json:"run_as,omitempty"` + // Presets applies preset transformations throughout the bundle, e.g. + // adding a name prefix to deployed resources. + Presets Presets `json:"presets,omitempty"` + Experimental *Experimental `json:"experimental,omitempty"` // Permissions section allows to define permissions which will be @@ -307,6 +311,7 @@ func (r *Root) MergeTargetOverrides(name string) error { "resources", "sync", "permissions", + "presets", } { if root, err = mergeField(root, target, f); err != nil { return err diff --git a/bundle/config/target.go b/bundle/config/target.go index acc493574..a2ef4d735 100644 --- a/bundle/config/target.go +++ b/bundle/config/target.go @@ -20,6 +20,10 @@ type Target struct { // development purposes. Mode Mode `json:"mode,omitempty"` + // Mutator configurations that e.g. change the + // name prefix of deployed resources. + Presets Presets `json:"presets,omitempty"` + // Overrides the compute used for jobs and other supported assets. ComputeID string `json:"compute_id,omitempty"` diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index fac3066dc..7a1081ded 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -47,6 +47,7 @@ func Initialize() bundle.Mutator { mutator.SetRunAs(), mutator.OverrideCompute(), mutator.ProcessTargetMode(), + mutator.ApplyPresets(), mutator.DefaultQueueing(), mutator.ExpandPipelineGlobPaths(), diff --git a/bundle/tests/presets/databricks.yml b/bundle/tests/presets/databricks.yml new file mode 100644 index 000000000..d83d31801 --- /dev/null +++ b/bundle/tests/presets/databricks.yml @@ -0,0 +1,22 @@ +bundle: + name: presets + +presets: + tags: + prod: true + team: finance + pipelines_development: true + +targets: + dev: + presets: + name_prefix: "myprefix" + pipelines_development: true + trigger_pause_status: PAUSED + jobs_max_concurrent_runs: 10 + tags: + dev: true + prod: false + prod: + presets: + pipelines_development: false diff --git a/bundle/tests/presets_test.go b/bundle/tests/presets_test.go new file mode 100644 index 000000000..5fcb5d95b --- /dev/null +++ b/bundle/tests/presets_test.go @@ -0,0 +1,28 @@ +package config_tests + +import ( + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/stretchr/testify/assert" +) + +func TestPresetsDev(t *testing.T) { + b := loadTarget(t, "./presets", "dev") + + assert.Equal(t, "myprefix", b.Config.Presets.NamePrefix) + assert.Equal(t, config.Paused, b.Config.Presets.TriggerPauseStatus) + assert.Equal(t, 10, b.Config.Presets.JobsMaxConcurrentRuns) + assert.Equal(t, true, *b.Config.Presets.PipelinesDevelopment) + assert.Equal(t, "true", b.Config.Presets.Tags["dev"]) + assert.Equal(t, "finance", b.Config.Presets.Tags["team"]) + assert.Equal(t, "false", b.Config.Presets.Tags["prod"]) +} + +func TestPresetsProd(t *testing.T) { + b := loadTarget(t, "./presets", "prod") + + assert.Equal(t, false, *b.Config.Presets.PipelinesDevelopment) + assert.Equal(t, "finance", b.Config.Presets.Tags["team"]) + assert.Equal(t, "true", b.Config.Presets.Tags["prod"]) +} From 242d4b51edd783341304aaa86184725102337268 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Tue, 20 Aug 2024 05:52:00 +0530 Subject: [PATCH 21/36] Report all empty resources present in error diagnostic (#1685) ## Changes This PR addressed post-merge feedback from https://github.com/databricks/cli/pull/1673. ## Tests Unit tests, and manually. ``` Error: experiment undefined-experiment is not defined at resources.experiments.undefined-experiment in databricks.yml:11:26 Error: job undefined-job is not defined at resources.jobs.undefined-job in databricks.yml:6:19 Error: pipeline undefined-pipeline is not defined at resources.pipelines.undefined-pipeline in databricks.yml:14:24 Name: undefined-job Target: default Found 3 errors ``` --- .../validate/all_resources_have_values.go | 42 ++++++++++------ .../environments_job_and_pipeline_test.go | 9 ---- bundle/tests/undefined_job/databricks.yml | 8 --- bundle/tests/undefined_job_test.go | 22 -------- .../tests/undefined_pipeline/databricks.yml | 8 --- .../tests/undefined_resources/databricks.yml | 14 ++++++ bundle/tests/undefined_resources_test.go | 50 +++++++++++++++++++ 7 files changed, 90 insertions(+), 63 deletions(-) delete mode 100644 bundle/tests/undefined_job/databricks.yml delete mode 100644 bundle/tests/undefined_job_test.go delete mode 100644 bundle/tests/undefined_pipeline/databricks.yml create mode 100644 bundle/tests/undefined_resources/databricks.yml create mode 100644 bundle/tests/undefined_resources_test.go diff --git a/bundle/config/validate/all_resources_have_values.go b/bundle/config/validate/all_resources_have_values.go index 019fe48a2..7f96e529a 100644 --- a/bundle/config/validate/all_resources_have_values.go +++ b/bundle/config/validate/all_resources_have_values.go @@ -3,6 +3,7 @@ package validate import ( "context" "fmt" + "slices" "strings" "github.com/databricks/cli/bundle" @@ -21,27 +22,36 @@ func (m *allResourcesHaveValues) Name() string { } func (m *allResourcesHaveValues) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - rv := b.Config.Value().Get("resources") - - // Skip if there are no resources block defined, or the resources block is empty. - if rv.Kind() == dyn.KindInvalid || rv.Kind() == dyn.KindNil { - return nil - } + diags := diag.Diagnostics{} _, err := dyn.MapByPattern( - rv, - dyn.NewPattern(dyn.AnyKey(), dyn.AnyKey()), + b.Config.Value(), + dyn.NewPattern(dyn.Key("resources"), dyn.AnyKey(), dyn.AnyKey()), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { - if v.Kind() == dyn.KindInvalid || v.Kind() == dyn.KindNil { - // Type of the resource, stripped of the trailing 's' to make it - // singular. - rType := strings.TrimSuffix(p[0].Key(), "s") - - rName := p[1].Key() - return v, fmt.Errorf("%s %s is not defined", rType, rName) + if v.Kind() != dyn.KindNil { + return v, nil } + + // Type of the resource, stripped of the trailing 's' to make it + // singular. + rType := strings.TrimSuffix(p[1].Key(), "s") + + // Name of the resource. Eg: "foo" in "jobs.foo". + rName := p[2].Key() + + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("%s %s is not defined", rType, rName), + Locations: v.Locations(), + Paths: []dyn.Path{slices.Clone(p)}, + }) + return v, nil }, ) - return diag.FromErr(err) + if err != nil { + diags = append(diags, diag.FromErr(err)...) + } + + return diags } diff --git a/bundle/tests/environments_job_and_pipeline_test.go b/bundle/tests/environments_job_and_pipeline_test.go index 0abeb487c..218d2e470 100644 --- a/bundle/tests/environments_job_and_pipeline_test.go +++ b/bundle/tests/environments_job_and_pipeline_test.go @@ -1,7 +1,6 @@ package config_tests import ( - "path/filepath" "testing" "github.com/databricks/cli/bundle/config" @@ -15,8 +14,6 @@ func TestJobAndPipelineDevelopmentWithEnvironment(t *testing.T) { assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - l := b.Config.GetLocation("resources.pipelines.nyc_taxi_pipeline") - assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(l.File)) assert.Equal(t, b.Config.Bundle.Mode, config.Development) assert.True(t, p.Development) require.Len(t, p.Libraries, 1) @@ -30,8 +27,6 @@ func TestJobAndPipelineStagingWithEnvironment(t *testing.T) { assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - l := b.Config.GetLocation("resources.pipelines.nyc_taxi_pipeline") - assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(l.File)) assert.False(t, p.Development) require.Len(t, p.Libraries, 1) assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) @@ -44,16 +39,12 @@ func TestJobAndPipelineProductionWithEnvironment(t *testing.T) { assert.Len(t, b.Config.Resources.Pipelines, 1) p := b.Config.Resources.Pipelines["nyc_taxi_pipeline"] - pl := b.Config.GetLocation("resources.pipelines.nyc_taxi_pipeline") - assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(pl.File)) assert.False(t, p.Development) require.Len(t, p.Libraries, 1) assert.Equal(t, "./dlt/nyc_taxi_loader", p.Libraries[0].Notebook.Path) assert.Equal(t, "nyc_taxi_production", p.Target) j := b.Config.Resources.Jobs["pipeline_schedule"] - jl := b.Config.GetLocation("resources.jobs.pipeline_schedule") - assert.Equal(t, "environments_job_and_pipeline/databricks.yml", filepath.ToSlash(jl.File)) assert.Equal(t, "Daily refresh of production pipeline", j.Name) require.Len(t, j.Tasks, 1) assert.NotEmpty(t, j.Tasks[0].PipelineTask.PipelineId) diff --git a/bundle/tests/undefined_job/databricks.yml b/bundle/tests/undefined_job/databricks.yml deleted file mode 100644 index 12c19f946..000000000 --- a/bundle/tests/undefined_job/databricks.yml +++ /dev/null @@ -1,8 +0,0 @@ -bundle: - name: undefined-job - -resources: - jobs: - undefined: - test: - name: "Test Job" diff --git a/bundle/tests/undefined_job_test.go b/bundle/tests/undefined_job_test.go deleted file mode 100644 index 4596f2069..000000000 --- a/bundle/tests/undefined_job_test.go +++ /dev/null @@ -1,22 +0,0 @@ -package config_tests - -import ( - "context" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/validate" - "github.com/stretchr/testify/assert" -) - -func TestUndefinedJobLoadsWithError(t *testing.T) { - b := load(t, "./undefined_job") - diags := bundle.Apply(context.Background(), b, validate.AllResourcesHaveValues()) - assert.ErrorContains(t, diags.Error(), "job undefined is not defined") -} - -func TestUndefinedPipelineLoadsWithError(t *testing.T) { - b := load(t, "./undefined_pipeline") - diags := bundle.Apply(context.Background(), b, validate.AllResourcesHaveValues()) - assert.ErrorContains(t, diags.Error(), "pipeline undefined is not defined") -} diff --git a/bundle/tests/undefined_pipeline/databricks.yml b/bundle/tests/undefined_pipeline/databricks.yml deleted file mode 100644 index a52fda38c..000000000 --- a/bundle/tests/undefined_pipeline/databricks.yml +++ /dev/null @@ -1,8 +0,0 @@ -bundle: - name: undefined-pipeline - -resources: - pipelines: - undefined: - test: - name: "Test Pipeline" diff --git a/bundle/tests/undefined_resources/databricks.yml b/bundle/tests/undefined_resources/databricks.yml new file mode 100644 index 000000000..ffc0e46da --- /dev/null +++ b/bundle/tests/undefined_resources/databricks.yml @@ -0,0 +1,14 @@ +bundle: + name: undefined-job + +resources: + jobs: + undefined-job: + test: + name: "Test Job" + + experiments: + undefined-experiment: + + pipelines: + undefined-pipeline: diff --git a/bundle/tests/undefined_resources_test.go b/bundle/tests/undefined_resources_test.go new file mode 100644 index 000000000..3dbacbc25 --- /dev/null +++ b/bundle/tests/undefined_resources_test.go @@ -0,0 +1,50 @@ +package config_tests + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/validate" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/assert" +) + +func TestUndefinedResourcesLoadWithError(t *testing.T) { + b := load(t, "./undefined_resources") + diags := bundle.Apply(context.Background(), b, validate.AllResourcesHaveValues()) + + assert.Len(t, diags, 3) + assert.Contains(t, diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "job undefined-job is not defined", + Locations: []dyn.Location{{ + File: filepath.FromSlash("undefined_resources/databricks.yml"), + Line: 6, + Column: 19, + }}, + Paths: []dyn.Path{dyn.MustPathFromString("resources.jobs.undefined-job")}, + }) + assert.Contains(t, diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "experiment undefined-experiment is not defined", + Locations: []dyn.Location{{ + File: filepath.FromSlash("undefined_resources/databricks.yml"), + Line: 11, + Column: 26, + }}, + Paths: []dyn.Path{dyn.MustPathFromString("resources.experiments.undefined-experiment")}, + }) + assert.Contains(t, diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: "pipeline undefined-pipeline is not defined", + Locations: []dyn.Location{{ + File: filepath.FromSlash("undefined_resources/databricks.yml"), + Line: 14, + Column: 24, + }}, + Paths: []dyn.Path{dyn.MustPathFromString("resources.pipelines.undefined-pipeline")}, + }) +} From 6771ba09a699b3890316cf8f849b3a51733750e4 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 20 Aug 2024 11:33:03 +0200 Subject: [PATCH 22/36] Correctly mark package names with versions as remote libraries (#1697) ## Changes Fixes https://github.com/databricks/setup-cli/issues/124 ## Tests Added regression test --- bundle/libraries/local_path.go | 5 +++++ bundle/libraries/local_path_test.go | 1 + 2 files changed, 6 insertions(+) diff --git a/bundle/libraries/local_path.go b/bundle/libraries/local_path.go index 5b5ec6c07..3e32adfde 100644 --- a/bundle/libraries/local_path.go +++ b/bundle/libraries/local_path.go @@ -66,6 +66,11 @@ func IsLibraryLocal(dep string) bool { } func isPackage(name string) bool { + // If the dependency has ==, it's a package with version + if strings.Contains(name, "==") { + return true + } + // If the dependency has no extension, it's a PyPi package name return path.Ext(name) == "" } diff --git a/bundle/libraries/local_path_test.go b/bundle/libraries/local_path_test.go index be4028d52..7299cdc93 100644 --- a/bundle/libraries/local_path_test.go +++ b/bundle/libraries/local_path_test.go @@ -54,6 +54,7 @@ func TestIsLibraryLocal(t *testing.T) { {path: "-r /Workspace/my_project/requirements.txt", expected: false}, {path: "s3://mybucket/path/to/package", expected: false}, {path: "dbfs:/mnt/path/to/package", expected: false}, + {path: "beautifulsoup4==4.12.3", expected: false}, } for i, tc := range testCases { From af5048e73efab56dd2a13a02132e78d3ee84c5e7 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 20 Aug 2024 14:54:56 +0200 Subject: [PATCH 23/36] Share test initializer in common helper function (#1695) ## Changes These tests inadvertently re-ran mutators, the first time through `loadTarget` and the second time by running `phases.Initialize()` themselves. Some of the mutators that are executed in `phases.Initialize()` are also run as part of `loadTarget`. This is overdue a refactor to make it unambiguous what runs when. Until then, this removes the duplicated execution. ## Tests Unit tests pass. --- bundle/tests/loader.go | 29 +++++++++++++++ bundle/tests/pipeline_glob_paths_test.go | 37 +------------------ .../tests/relative_path_translation_test.go | 29 +-------------- 3 files changed, 33 insertions(+), 62 deletions(-) diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index 069f09358..848132a13 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -8,6 +8,10 @@ import ( "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -43,3 +47,28 @@ func loadTargetWithDiags(path, env string) (*bundle.Bundle, diag.Diagnostics) { )) return b, diags } + +func configureMock(t *testing.T, b *bundle.Bundle) { + // Configure mock workspace client + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &config.Config{ + Host: "https://mock.databricks.workspace.com", + } + m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ + UserName: "user@domain.com", + }, nil) + b.SetWorkpaceClient(m.WorkspaceClient) +} + +func initializeTarget(t *testing.T, path, env string) (*bundle.Bundle, diag.Diagnostics) { + b := load(t, path) + configureMock(t, b) + + ctx := context.Background() + diags := bundle.Apply(ctx, b, bundle.Seq( + mutator.SelectTarget(env), + phases.Initialize(), + )) + + return b, diags +} diff --git a/bundle/tests/pipeline_glob_paths_test.go b/bundle/tests/pipeline_glob_paths_test.go index bf5039b5f..c1c62cfb6 100644 --- a/bundle/tests/pipeline_glob_paths_test.go +++ b/bundle/tests/pipeline_glob_paths_test.go @@ -1,33 +1,13 @@ package config_tests import ( - "context" "testing" - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/phases" - "github.com/databricks/databricks-sdk-go/config" - "github.com/databricks/databricks-sdk-go/experimental/mocks" - "github.com/databricks/databricks-sdk-go/service/iam" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) func TestExpandPipelineGlobPaths(t *testing.T) { - b := loadTarget(t, "./pipeline_glob_paths", "default") - - // Configure mock workspace client - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &config.Config{ - Host: "https://mock.databricks.workspace.com", - } - m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ - UserName: "user@domain.com", - }, nil) - b.SetWorkpaceClient(m.WorkspaceClient) - - ctx := context.Background() - diags := bundle.Apply(ctx, b, phases.Initialize()) + b, diags := initializeTarget(t, "./pipeline_glob_paths", "default") require.NoError(t, diags.Error()) require.Equal( t, @@ -37,19 +17,6 @@ func TestExpandPipelineGlobPaths(t *testing.T) { } func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) { - b := loadTarget(t, "./pipeline_glob_paths", "error") - - // Configure mock workspace client - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &config.Config{ - Host: "https://mock.databricks.workspace.com", - } - m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ - UserName: "user@domain.com", - }, nil) - b.SetWorkpaceClient(m.WorkspaceClient) - - ctx := context.Background() - diags := bundle.Apply(ctx, b, phases.Initialize()) + _, diags := initializeTarget(t, "./pipeline_glob_paths", "error") require.ErrorContains(t, diags.Error(), "notebook ./non-existent not found") } diff --git a/bundle/tests/relative_path_translation_test.go b/bundle/tests/relative_path_translation_test.go index d5b80bea5..199871d23 100644 --- a/bundle/tests/relative_path_translation_test.go +++ b/bundle/tests/relative_path_translation_test.go @@ -1,36 +1,14 @@ package config_tests import ( - "context" "testing" - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/phases" - "github.com/databricks/databricks-sdk-go/config" - "github.com/databricks/databricks-sdk-go/experimental/mocks" - "github.com/databricks/databricks-sdk-go/service/iam" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -func configureMock(t *testing.T, b *bundle.Bundle) { - // Configure mock workspace client - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &config.Config{ - Host: "https://mock.databricks.workspace.com", - } - m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ - UserName: "user@domain.com", - }, nil) - b.SetWorkpaceClient(m.WorkspaceClient) -} - func TestRelativePathTranslationDefault(t *testing.T) { - b := loadTarget(t, "./relative_path_translation", "default") - configureMock(t, b) - - diags := bundle.Apply(context.Background(), b, phases.Initialize()) + b, diags := initializeTarget(t, "./relative_path_translation", "default") require.NoError(t, diags.Error()) t0 := b.Config.Resources.Jobs["job"].Tasks[0] @@ -40,10 +18,7 @@ func TestRelativePathTranslationDefault(t *testing.T) { } func TestRelativePathTranslationOverride(t *testing.T) { - b := loadTarget(t, "./relative_path_translation", "override") - configureMock(t, b) - - diags := bundle.Apply(context.Background(), b, phases.Initialize()) + b, diags := initializeTarget(t, "./relative_path_translation", "override") require.NoError(t, diags.Error()) t0 := b.Config.Resources.Jobs["job"].Tasks[0] From 44902fa3501033928a5ec46dbfcf4cb23f739788 Mon Sep 17 00:00:00 2001 From: Gleb Kanterov Date: Tue, 20 Aug 2024 15:26:57 +0200 Subject: [PATCH 24/36] Make `pydabs/venv_path` optional (#1687) ## Changes Make `pydabs/venv_path` optional. When not specified, CLI detects the Python interpreter using `python.DetectExecutable`, the same way as for `artifacts`. `python.DetectExecutable` works correctly if a virtual environment is activated or `python3` is available on PATH through other means. Extract the venv detection code from PyDABs into `libs/python/detect`. This code will be used when we implement the `python/venv_path` section in `databricks.yml`. ## Tests Unit tests and manually --------- Co-authored-by: Pieter Noordhuis --- bundle/artifacts/whl/infer.go | 2 + bundle/config/experimental.go | 4 +- .../config/mutator/python/python_mutator.go | 33 ++++++------- .../mutator/python/python_mutator_test.go | 21 +++++++-- libs/python/detect.go | 46 +++++++++++++++++++ libs/python/detect_test.go | 46 +++++++++++++++++++ 6 files changed, 128 insertions(+), 24 deletions(-) create mode 100644 libs/python/detect_test.go diff --git a/bundle/artifacts/whl/infer.go b/bundle/artifacts/whl/infer.go index dd4ad2956..cb727de0e 100644 --- a/bundle/artifacts/whl/infer.go +++ b/bundle/artifacts/whl/infer.go @@ -15,6 +15,8 @@ type infer struct { func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact := b.Config.Artifacts[m.name] + + // TODO use python.DetectVEnvExecutable once bundle has a way to specify venv path py, err := python.DetectExecutable(ctx) if err != nil { return diag.FromErr(err) diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 66e975820..061bbdae0 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -36,8 +36,8 @@ type PyDABs struct { // VEnvPath is path to the virtual environment. // - // Required if PyDABs is enabled. PyDABs will load the code in the specified - // environment. + // If enabled, PyDABs will execute code within this environment. If disabled, + // it defaults to using the Python interpreter available in the current shell. VEnvPath string `json:"venv_path,omitempty"` // Import contains a list Python packages with PyDABs code. diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go index f9febe5b5..4f44df0a9 100644 --- a/bundle/config/mutator/python/python_mutator.go +++ b/bundle/config/mutator/python/python_mutator.go @@ -7,8 +7,8 @@ import ( "fmt" "os" "path/filepath" - "runtime" + "github.com/databricks/cli/libs/python" "github.com/databricks/databricks-sdk-go/logger" "github.com/databricks/cli/bundle/env" @@ -86,23 +86,15 @@ func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno return nil } - if experimental.PyDABs.VEnvPath == "" { - return diag.Errorf("\"experimental.pydabs.enabled\" can only be used when \"experimental.pydabs.venv_path\" is set") - } - // mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics' var mutateDiags diag.Diagnostics var mutateDiagsHasError = errors.New("unexpected error") err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { - pythonPath := interpreterPath(experimental.PyDABs.VEnvPath) + pythonPath, err := detectExecutable(ctx, experimental.PyDABs.VEnvPath) - if _, err := os.Stat(pythonPath); err != nil { - if os.IsNotExist(err) { - return dyn.InvalidValue, fmt.Errorf("can't find %q, check if venv is created", pythonPath) - } else { - return dyn.InvalidValue, fmt.Errorf("can't find %q: %w", pythonPath, err) - } + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to get Python interpreter path: %w", err) } cacheDir, err := createCacheDir(ctx) @@ -423,11 +415,16 @@ func isOmitemptyDelete(left dyn.Value) bool { } } -// interpreterPath returns platform-specific path to Python interpreter in the virtual environment. -func interpreterPath(venvPath string) string { - if runtime.GOOS == "windows" { - return filepath.Join(venvPath, "Scripts", "python3.exe") - } else { - return filepath.Join(venvPath, "bin", "python3") +// detectExecutable lookups Python interpreter in virtual environment, or if not set, in PATH. +func detectExecutable(ctx context.Context, venvPath string) (string, error) { + if venvPath == "" { + interpreter, err := python.DetectExecutable(ctx) + if err != nil { + return "", err + } + + return interpreter, nil } + + return python.DetectVEnvExecutable(venvPath) } diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go index fbe835f92..ea02d1ced 100644 --- a/bundle/config/mutator/python/python_mutator_test.go +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -282,7 +282,7 @@ func TestPythonMutator_venvRequired(t *testing.T) { } func TestPythonMutator_venvNotFound(t *testing.T) { - expectedError := fmt.Sprintf("can't find %q, check if venv is created", interpreterPath("bad_path")) + expectedError := fmt.Sprintf("failed to get Python interpreter path: can't find %q, check if virtualenv is created", interpreterPath("bad_path")) b := loadYaml("databricks.yml", ` experimental: @@ -596,9 +596,7 @@ func loadYaml(name string, content string) *bundle.Bundle { } } -func withFakeVEnv(t *testing.T, path string) { - interpreterPath := interpreterPath(path) - +func withFakeVEnv(t *testing.T, venvPath string) { cwd, err := os.Getwd() if err != nil { panic(err) @@ -608,6 +606,8 @@ func withFakeVEnv(t *testing.T, path string) { panic(err) } + interpreterPath := interpreterPath(venvPath) + err = os.MkdirAll(filepath.Dir(interpreterPath), 0755) if err != nil { panic(err) @@ -618,9 +618,22 @@ func withFakeVEnv(t *testing.T, path string) { panic(err) } + err = os.WriteFile(filepath.Join(venvPath, "pyvenv.cfg"), []byte(""), 0755) + if err != nil { + panic(err) + } + t.Cleanup(func() { if err := os.Chdir(cwd); err != nil { panic(err) } }) } + +func interpreterPath(venvPath string) string { + if runtime.GOOS == "windows" { + return filepath.Join(venvPath, "Scripts", "python3.exe") + } else { + return filepath.Join(venvPath, "bin", "python3") + } +} diff --git a/libs/python/detect.go b/libs/python/detect.go index b0c1475c0..8fcc7cd9c 100644 --- a/libs/python/detect.go +++ b/libs/python/detect.go @@ -3,9 +3,23 @@ package python import ( "context" "errors" + "fmt" + "io/fs" + "os" "os/exec" + "path/filepath" + "runtime" ) +// DetectExecutable looks up the path to the python3 executable from the PATH +// environment variable. +// +// If virtualenv is activated, executable from the virtualenv is returned, +// because activating virtualenv adds python3 executable on a PATH. +// +// If python3 executable is not found on the PATH, the interpreter with the +// least version that satisfies minimal 3.8 version is returned, e.g. +// python3.10. func DetectExecutable(ctx context.Context) (string, error) { // TODO: add a shortcut if .python-version file is detected somewhere in // the parent directory tree. @@ -32,3 +46,35 @@ func DetectExecutable(ctx context.Context) (string, error) { } return interpreter.Path, nil } + +// DetectVEnvExecutable returns the path to the python3 executable inside venvPath, +// that is not necessarily activated. +// +// If virtualenv is not created, or executable doesn't exist, the error is returned. +func DetectVEnvExecutable(venvPath string) (string, error) { + interpreterPath := filepath.Join(venvPath, "bin", "python3") + if runtime.GOOS == "windows" { + interpreterPath = filepath.Join(venvPath, "Scripts", "python3.exe") + } + + if _, err := os.Stat(interpreterPath); err != nil { + if errors.Is(err, fs.ErrNotExist) { + return "", fmt.Errorf("can't find %q, check if virtualenv is created", interpreterPath) + } else { + return "", fmt.Errorf("can't find %q: %w", interpreterPath, err) + } + } + + // pyvenv.cfg must be always present in correctly configured virtualenv, + // read more in https://snarky.ca/how-virtual-environments-work/ + pyvenvPath := filepath.Join(venvPath, "pyvenv.cfg") + if _, err := os.Stat(pyvenvPath); err != nil { + if errors.Is(err, fs.ErrNotExist) { + return "", fmt.Errorf("expected %q to be virtualenv, but pyvenv.cfg is missing", venvPath) + } else { + return "", fmt.Errorf("can't find %q: %w", pyvenvPath, err) + } + } + + return interpreterPath, nil +} diff --git a/libs/python/detect_test.go b/libs/python/detect_test.go new file mode 100644 index 000000000..78c7067f7 --- /dev/null +++ b/libs/python/detect_test.go @@ -0,0 +1,46 @@ +package python + +import ( + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestDetectVEnvExecutable(t *testing.T) { + dir := t.TempDir() + interpreterPath := interpreterPath(dir) + + err := os.Mkdir(filepath.Dir(interpreterPath), 0755) + require.NoError(t, err) + + err = os.WriteFile(interpreterPath, []byte(""), 0755) + require.NoError(t, err) + + err = os.WriteFile(filepath.Join(dir, "pyvenv.cfg"), []byte(""), 0755) + require.NoError(t, err) + + executable, err := DetectVEnvExecutable(dir) + + assert.NoError(t, err) + assert.Equal(t, interpreterPath, executable) +} + +func TestDetectVEnvExecutable_badLayout(t *testing.T) { + dir := t.TempDir() + + _, err := DetectVEnvExecutable(dir) + + assert.Errorf(t, err, "can't find %q, check if virtualenv is created", interpreterPath(dir)) +} + +func interpreterPath(venvPath string) string { + if runtime.GOOS == "windows" { + return filepath.Join(venvPath, "Scripts", "python3.exe") + } else { + return filepath.Join(venvPath, "bin", "python3") + } +} From a4c1ba3e2827abca29034115436d441310b7ee33 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 21 Aug 2024 13:15:25 +0530 Subject: [PATCH 25/36] Use API mocks for duplicate path errors in workspace files extensions client (#1690) ## Changes `TestAccFilerWorkspaceFilesExtensionsErrorsOnDupName` recently started failing in our nightlies because the upstream `import` API was changed to [prohibit conflicting file paths](https://docs.databricks.com/en/release-notes/product/2024/august.html#files-can-no-longer-have-identical-names-in-workspace-folders). Because existing conflicting file paths can still be grandfathered in, we need to retain coverage for the test. To do this, this PR: 1. Removes the failing `TestAccFilerWorkspaceFilesExtensionsErrorsOnDupName` 2. Add an equivalent unit test with the `list` and `get-status` API calls mocked. --- internal/filer_test.go | 62 ------- libs/filer/workspace_files_client.go | 26 +-- .../workspace_files_extensions_client.go | 8 +- .../workspace_files_extensions_client_test.go | 151 ++++++++++++++++++ 4 files changed, 172 insertions(+), 75 deletions(-) create mode 100644 libs/filer/workspace_files_extensions_client_test.go diff --git a/internal/filer_test.go b/internal/filer_test.go index 275304256..bc4c94808 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "errors" - "fmt" "io" "io/fs" "path" @@ -722,67 +721,6 @@ func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { - t.Parallel() - - tcases := []struct { - files []struct{ name, content string } - name string - }{ - { - name: "python", - files: []struct{ name, content string }{ - {"foo.py", "print('foo')"}, - {"foo.py", "# Databricks notebook source\nprint('foo')"}, - }, - }, - { - name: "r", - files: []struct{ name, content string }{ - {"foo.r", "print('foo')"}, - {"foo.r", "# Databricks notebook source\nprint('foo')"}, - }, - }, - { - name: "sql", - files: []struct{ name, content string }{ - {"foo.sql", "SELECT 'foo'"}, - {"foo.sql", "-- Databricks notebook source\nSELECT 'foo'"}, - }, - }, - { - name: "scala", - files: []struct{ name, content string }{ - {"foo.scala", "println('foo')"}, - {"foo.scala", "// Databricks notebook source\nprintln('foo')"}, - }, - }, - // We don't need to test this for ipynb notebooks. The import API - // fails when the file extension is .ipynb but the content is not a - // valid juptyer notebook. - } - - for i := range tcases { - tc := tcases[i] - t.Run(tc.name, func(t *testing.T) { - t.Parallel() - - ctx := context.Background() - wf, tmpDir := setupWsfsExtensionsFiler(t) - - for _, f := range tc.files { - err := wf.Write(ctx, f.name, strings.NewReader(f.content), filer.CreateParentDirectories) - require.NoError(t, err) - } - - _, err := wf.ReadDir(ctx, ".") - assert.ErrorAs(t, err, &filer.DuplicatePathError{}) - assert.ErrorContains(t, err, fmt.Sprintf("failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at %s and FILE at %s resolve to the same name %s. Changing the name of one of these objects will resolve this issue", path.Join(tmpDir, "foo"), path.Join(tmpDir, tc.files[0].name), tc.files[0].name)) - }) - } - -} - func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { t.Parallel() diff --git a/libs/filer/workspace_files_client.go b/libs/filer/workspace_files_client.go index e911f4409..d8ab5a6bb 100644 --- a/libs/filer/workspace_files_client.go +++ b/libs/filer/workspace_files_client.go @@ -102,13 +102,21 @@ func (info *wsfsFileInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(info) } +// Interface for *client.DatabricksClient from the Databricks Go SDK. Abstracted +// as an interface to allow for mocking in tests. +type apiClient interface { + Do(ctx context.Context, method, path string, + headers map[string]string, request, response any, + visitors ...func(*http.Request) error) error +} + // WorkspaceFilesClient implements the files-in-workspace API. // NOTE: This API is available for files under /Repos if a workspace has files-in-repos enabled. // It can access any workspace path if files-in-workspace is enabled. -type WorkspaceFilesClient struct { +type workspaceFilesClient struct { workspaceClient *databricks.WorkspaceClient - apiClient *client.DatabricksClient + apiClient apiClient // File operations will be relative to this path. root WorkspaceRootPath @@ -120,7 +128,7 @@ func NewWorkspaceFilesClient(w *databricks.WorkspaceClient, root string) (Filer, return nil, err } - return &WorkspaceFilesClient{ + return &workspaceFilesClient{ workspaceClient: w, apiClient: apiClient, @@ -128,7 +136,7 @@ func NewWorkspaceFilesClient(w *databricks.WorkspaceClient, root string) (Filer, }, nil } -func (w *WorkspaceFilesClient) Write(ctx context.Context, name string, reader io.Reader, mode ...WriteMode) error { +func (w *workspaceFilesClient) Write(ctx context.Context, name string, reader io.Reader, mode ...WriteMode) error { absPath, err := w.root.Join(name) if err != nil { return err @@ -198,7 +206,7 @@ func (w *WorkspaceFilesClient) Write(ctx context.Context, name string, reader io return err } -func (w *WorkspaceFilesClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { +func (w *workspaceFilesClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { absPath, err := w.root.Join(name) if err != nil { return nil, err @@ -222,7 +230,7 @@ func (w *WorkspaceFilesClient) Read(ctx context.Context, name string) (io.ReadCl return w.workspaceClient.Workspace.Download(ctx, absPath) } -func (w *WorkspaceFilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { +func (w *workspaceFilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { absPath, err := w.root.Join(name) if err != nil { return err @@ -266,7 +274,7 @@ func (w *WorkspaceFilesClient) Delete(ctx context.Context, name string, mode ... return err } -func (w *WorkspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { +func (w *workspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { absPath, err := w.root.Join(name) if err != nil { return nil, err @@ -299,7 +307,7 @@ func (w *WorkspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.D return wsfsDirEntriesFromObjectInfos(objects), nil } -func (w *WorkspaceFilesClient) Mkdir(ctx context.Context, name string) error { +func (w *workspaceFilesClient) Mkdir(ctx context.Context, name string) error { dirPath, err := w.root.Join(name) if err != nil { return err @@ -309,7 +317,7 @@ func (w *WorkspaceFilesClient) Mkdir(ctx context.Context, name string) error { }) } -func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { +func (w *workspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { absPath, err := w.root.Join(name) if err != nil { return nil, err diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index 844e736b5..b24ecf7ee 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -133,14 +133,14 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx con }, nil } -type DuplicatePathError struct { +type duplicatePathError struct { oi1 workspace.ObjectInfo oi2 workspace.ObjectInfo commonName string } -func (e DuplicatePathError) Error() string { +func (e duplicatePathError) Error() string { return fmt.Sprintf("failed to read files from the workspace file system. Duplicate paths encountered. Both %s at %s and %s at %s resolve to the same name %s. Changing the name of one of these objects will resolve this issue", e.oi1.ObjectType, e.oi1.Path, e.oi2.ObjectType, e.oi2.Path, e.commonName) } @@ -157,7 +157,7 @@ func (e ReadOnlyError) Error() string { // delete, and stat notebooks (and files in general) in the workspace, using their paths // with the extension included. // -// The ReadDir method returns a DuplicatePathError if this traditional file system view is +// The ReadDir method returns a duplicatePathError if this traditional file system view is // not possible. For example, a Python notebook called foo and a Python file called `foo.py` // would resolve to the same path `foo.py` in a tradition file system. // @@ -220,7 +220,7 @@ func (w *workspaceFilesExtensionsClient) ReadDir(ctx context.Context, name strin // Error if we have seen this path before in the current directory. // If not seen before, add it to the seen paths. if _, ok := seenPaths[entries[i].Name()]; ok { - return nil, DuplicatePathError{ + return nil, duplicatePathError{ oi1: seenPaths[entries[i].Name()], oi2: sysInfo, commonName: path.Join(name, entries[i].Name()), diff --git a/libs/filer/workspace_files_extensions_client_test.go b/libs/filer/workspace_files_extensions_client_test.go new file mode 100644 index 000000000..321c43712 --- /dev/null +++ b/libs/filer/workspace_files_extensions_client_test.go @@ -0,0 +1,151 @@ +package filer + +import ( + "context" + "net/http" + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" +) + +// Mocks client.DatabricksClient from the databricks-sdk-go package. +type mockApiClient struct { + mock.Mock +} + +func (m *mockApiClient) Do(ctx context.Context, method, path string, + headers map[string]string, request any, response any, + visitors ...func(*http.Request) error) error { + args := m.Called(ctx, method, path, headers, request, response, visitors) + + // Set the http response from a value provided in the mock call. + p := response.(*wsfsFileInfo) + *p = args.Get(1).(wsfsFileInfo) + return args.Error(0) +} + +func TestFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { + for _, tc := range []struct { + name string + language workspace.Language + notebookExportFormat workspace.ExportFormat + notebookPath string + filePath string + expectedError string + }{ + { + name: "python source notebook and file", + language: workspace.LanguagePython, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.py", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.py resolve to the same name /foo.py. Changing the name of one of these objects will resolve this issue", + }, + { + name: "python jupyter notebook and file", + language: workspace.LanguagePython, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.py", + // Jupyter notebooks would correspond to foo.ipynb so an error is not expected. + expectedError: "", + }, + { + name: "scala source notebook and file", + language: workspace.LanguageScala, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.scala", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.scala resolve to the same name /foo.scala. Changing the name of one of these objects will resolve this issue", + }, + { + name: "r source notebook and file", + language: workspace.LanguageR, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.r", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.r resolve to the same name /foo.r. Changing the name of one of these objects will resolve this issue", + }, + { + name: "sql source notebook and file", + language: workspace.LanguageSql, + notebookExportFormat: workspace.ExportFormatSource, + notebookPath: "/dir/foo", + filePath: "/dir/foo.sql", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.sql resolve to the same name /foo.sql. Changing the name of one of these objects will resolve this issue", + }, + { + name: "python jupyter notebook and file", + language: workspace.LanguagePython, + notebookExportFormat: workspace.ExportFormatJupyter, + notebookPath: "/dir/foo", + filePath: "/dir/foo.ipynb", + expectedError: "failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at /dir/foo and FILE at /dir/foo.ipynb resolve to the same name /foo.ipynb. Changing the name of one of these objects will resolve this issue", + }, + } { + t.Run(tc.name, func(t *testing.T) { + mockedWorkspaceClient := mocks.NewMockWorkspaceClient(t) + mockedApiClient := mockApiClient{} + + // Mock the workspace API's ListAll method. + workspaceApi := mockedWorkspaceClient.GetMockWorkspaceAPI() + workspaceApi.EXPECT().ListAll(mock.Anything, workspace.ListWorkspaceRequest{ + Path: "/dir", + }).Return([]workspace.ObjectInfo{ + { + Path: tc.filePath, + Language: tc.language, + ObjectType: workspace.ObjectTypeFile, + }, + { + Path: tc.notebookPath, + Language: tc.language, + ObjectType: workspace.ObjectTypeNotebook, + }, + }, nil) + + // Mock bespoke API calls to /api/2.0/workspace/get-status, that are + // used to figure out the right file extension for the notebook. + statNotebook := wsfsFileInfo{ + ObjectInfo: workspace.ObjectInfo{ + Path: tc.notebookPath, + Language: tc.language, + ObjectType: workspace.ObjectTypeNotebook, + }, + ReposExportFormat: tc.notebookExportFormat, + } + + mockedApiClient.On("Do", mock.Anything, http.MethodGet, "/api/2.0/workspace/get-status", map[string]string(nil), map[string]string{ + "path": tc.notebookPath, + "return_export_info": "true", + }, mock.AnythingOfType("*filer.wsfsFileInfo"), []func(*http.Request) error(nil)).Return(nil, statNotebook) + + workspaceFilesClient := workspaceFilesClient{ + workspaceClient: mockedWorkspaceClient.WorkspaceClient, + apiClient: &mockedApiClient, + root: NewWorkspaceRootPath("/dir"), + } + + workspaceFilesExtensionsClient := workspaceFilesExtensionsClient{ + workspaceClient: mockedWorkspaceClient.WorkspaceClient, + wsfs: &workspaceFilesClient, + } + + _, err := workspaceFilesExtensionsClient.ReadDir(context.Background(), "/") + + if tc.expectedError == "" { + assert.NoError(t, err) + } else { + assert.ErrorAs(t, err, &duplicatePathError{}) + assert.EqualError(t, err, tc.expectedError) + } + + // assert the mocked methods were actually called, as a sanity check. + workspaceApi.AssertNumberOfCalls(t, "ListAll", 1) + mockedApiClient.AssertNumberOfCalls(t, "Do", 1) + }) + } +} From c775d251eda6fa567de95e55e4558d5b99abce39 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 21 Aug 2024 10:22:35 +0200 Subject: [PATCH 26/36] Improves detection of PyPI package names in environment dependencies (#1699) ## Changes Improves detection of PyPi package names in environment dependencies ## Tests Added unit tests --- bundle/libraries/local_path.go | 22 ++++++++++++++++++---- bundle/libraries/local_path_test.go | 9 +++++++++ 2 files changed, 27 insertions(+), 4 deletions(-) diff --git a/bundle/libraries/local_path.go b/bundle/libraries/local_path.go index 3e32adfde..417bce10e 100644 --- a/bundle/libraries/local_path.go +++ b/bundle/libraries/local_path.go @@ -3,6 +3,7 @@ package libraries import ( "net/url" "path" + "regexp" "strings" ) @@ -65,14 +66,27 @@ func IsLibraryLocal(dep string) bool { return IsLocalPath(dep) } +// ^[a-zA-Z0-9\-_]+: Matches the package name, allowing alphanumeric characters, dashes (-), and underscores (_). +// \[.*\])?: Optionally matches any extras specified in square brackets, e.g., [security]. +// ((==|!=|<=|>=|~=|>|<)\d+(\.\d+){0,2}(\.\*)?)?: Optionally matches version specifiers, supporting various operators (==, !=, etc.) followed by a version number (e.g., 2.25.1). +// Spec for package name and version specifier: https://pip.pypa.io/en/stable/reference/requirement-specifiers/ +var packageRegex = regexp.MustCompile(`^[a-zA-Z0-9\-_]+\s?(\[.*\])?\s?((==|!=|<=|>=|~=|==|>|<)\s?\d+(\.\d+){0,2}(\.\*)?)?$`) + func isPackage(name string) bool { - // If the dependency has ==, it's a package with version - if strings.Contains(name, "==") { + if packageRegex.MatchString(name) { return true } - // If the dependency has no extension, it's a PyPi package name - return path.Ext(name) == "" + return isUrlBasedLookup(name) +} + +func isUrlBasedLookup(name string) bool { + parts := strings.Split(name, " @ ") + if len(parts) != 2 { + return false + } + + return packageRegex.MatchString(parts[0]) && isRemoteStorageScheme(parts[1]) } func isRemoteStorageScheme(path string) bool { diff --git a/bundle/libraries/local_path_test.go b/bundle/libraries/local_path_test.go index 7299cdc93..7f84b3244 100644 --- a/bundle/libraries/local_path_test.go +++ b/bundle/libraries/local_path_test.go @@ -54,7 +54,16 @@ func TestIsLibraryLocal(t *testing.T) { {path: "-r /Workspace/my_project/requirements.txt", expected: false}, {path: "s3://mybucket/path/to/package", expected: false}, {path: "dbfs:/mnt/path/to/package", expected: false}, + {path: "beautifulsoup4", expected: false}, {path: "beautifulsoup4==4.12.3", expected: false}, + {path: "beautifulsoup4 >= 4.12.3", expected: false}, + {path: "beautifulsoup4 < 4.12.3", expected: false}, + {path: "beautifulsoup4 ~= 4.12.3", expected: false}, + {path: "beautifulsoup4[security, tests]", expected: false}, + {path: "beautifulsoup4[security, tests] ~= 4.12.3", expected: false}, + {path: "https://github.com/pypa/pip/archive/22.0.2.zip", expected: false}, + {path: "pip @ https://github.com/pypa/pip/archive/22.0.2.zip", expected: false}, + {path: "requests [security] @ https://github.com/psf/requests/archive/refs/heads/main.zip", expected: false}, } for i, tc := range testCases { From 192f33bb13a156bebf9d7d2c2b06092d8ae9775d Mon Sep 17 00:00:00 2001 From: Witold Czaplewski Date: Wed, 21 Aug 2024 12:03:56 +0200 Subject: [PATCH 27/36] [DAB] Add support for requirements libraries in Job Tasks (#1543) ## Changes While experimenting with DAB I discovered that requirements libraries are being ignored. One thing worth mentioning is that `bundle validate` runs successfully, but `bundle deploy` fails. This PR only covers the second part. ## Tests Added a unit test --- bundle/config/mutator/translate_paths_jobs.go | 5 +++++ bundle/config/mutator/translate_paths_test.go | 9 +++++++++ bundle/libraries/helpers.go | 3 +++ bundle/libraries/helpers_test.go | 1 + 4 files changed, 18 insertions(+) diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index 6febf4f8f..e34eeb2f0 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -50,6 +50,11 @@ func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern t.translateNoOp, noSkipRewrite, }, + { + base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("requirements")), + t.translateFilePath, + noSkipRewrite, + }, } } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 780a540df..fd64593be 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -110,6 +110,7 @@ func TestTranslatePaths(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py")) touchEmptyFile(t, filepath.Join(dir, "my_python_file.py")) touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar")) + touchEmptyFile(t, filepath.Join(dir, "requirements.txt")) b := &bundle.Bundle{ RootPath: dir, @@ -140,6 +141,9 @@ func TestTranslatePaths(t *testing.T) { NotebookTask: &jobs.NotebookTask{ NotebookPath: "./my_job_notebook.py", }, + Libraries: []compute.Library{ + {Requirements: "./requirements.txt"}, + }, }, { PythonWheelTask: &jobs.PythonWheelTask{ @@ -232,6 +236,11 @@ func TestTranslatePaths(t *testing.T) { "/bundle/my_job_notebook", b.Config.Resources.Jobs["job"].Tasks[2].NotebookTask.NotebookPath, ) + assert.Equal( + t, + "/bundle/requirements.txt", + b.Config.Resources.Jobs["job"].Tasks[2].Libraries[0].Requirements, + ) assert.Equal( t, "/bundle/my_python_file.py", diff --git a/bundle/libraries/helpers.go b/bundle/libraries/helpers.go index 89679c91a..b7e707ccf 100644 --- a/bundle/libraries/helpers.go +++ b/bundle/libraries/helpers.go @@ -12,5 +12,8 @@ func libraryPath(library *compute.Library) string { if library.Egg != "" { return library.Egg } + if library.Requirements != "" { + return library.Requirements + } return "" } diff --git a/bundle/libraries/helpers_test.go b/bundle/libraries/helpers_test.go index adc20a246..e4bd32770 100644 --- a/bundle/libraries/helpers_test.go +++ b/bundle/libraries/helpers_test.go @@ -13,5 +13,6 @@ func TestLibraryPath(t *testing.T) { assert.Equal(t, path, libraryPath(&compute.Library{Whl: path})) assert.Equal(t, path, libraryPath(&compute.Library{Jar: path})) assert.Equal(t, path, libraryPath(&compute.Library{Egg: path})) + assert.Equal(t, path, libraryPath(&compute.Library{Requirements: path})) assert.Equal(t, "", libraryPath(&compute.Library{})) } From f5df211320a5fad876c58737d959a0a034040c63 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Wed, 21 Aug 2024 18:23:54 +0530 Subject: [PATCH 28/36] Fix prefix preset used for UC schemas (#1704) ## Changes In https://github.com/databricks/cli/pull/1490 we regressed and started using the development mode prefix for UC schemas regardless of the mode of the bundle target. This PR fixes the regression and adds a regression test ## Tests Failing integration tests pass now. --- bundle/config/mutator/apply_presets.go | 3 +- bundle/config/mutator/apply_presets_test.go | 57 +++++++++++++++++++++ 2 files changed, 58 insertions(+), 2 deletions(-) diff --git a/bundle/config/mutator/apply_presets.go b/bundle/config/mutator/apply_presets.go index 42e6ab95f..28d015c10 100644 --- a/bundle/config/mutator/apply_presets.go +++ b/bundle/config/mutator/apply_presets.go @@ -155,8 +155,7 @@ func (m *applyPresets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnos // Schemas: Prefix for i := range r.Schemas { - prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_" - r.Schemas[i].Name = prefix + r.Schemas[i].Name + r.Schemas[i].Name = normalizePrefix(prefix) + r.Schemas[i].Name // HTTP API for schemas doesn't yet support tags. It's only supported in // the Databricks UI and via the SQL API. } diff --git a/bundle/config/mutator/apply_presets_test.go b/bundle/config/mutator/apply_presets_test.go index 35dac1f7d..ab2478aee 100644 --- a/bundle/config/mutator/apply_presets_test.go +++ b/bundle/config/mutator/apply_presets_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) @@ -68,6 +69,62 @@ func TestApplyPresetsPrefix(t *testing.T) { } } +func TestApplyPresetsPrefixForUcSchema(t *testing.T) { + tests := []struct { + name string + prefix string + schema *resources.Schema + want string + }{ + { + name: "add prefix to schema", + prefix: "[prefix]", + schema: &resources.Schema{ + CreateSchema: &catalog.CreateSchema{ + Name: "schema1", + }, + }, + want: "prefix_schema1", + }, + { + name: "add empty prefix to schema", + prefix: "", + schema: &resources.Schema{ + CreateSchema: &catalog.CreateSchema{ + Name: "schema1", + }, + }, + want: "schema1", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Schemas: map[string]*resources.Schema{ + "schema1": tt.schema, + }, + }, + Presets: config.Presets{ + NamePrefix: tt.prefix, + }, + }, + } + + ctx := context.Background() + diag := bundle.Apply(ctx, b, mutator.ApplyPresets()) + + if diag.HasError() { + t.Fatalf("unexpected error: %v", diag) + } + + require.Equal(t, tt.want, b.Config.Resources.Schemas["schema1"].Name) + }) + } +} + func TestApplyPresetsTags(t *testing.T) { tests := []struct { name string From 6f345293b1e5f4febcd702da8a362b15b606ebd9 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Wed, 21 Aug 2024 17:05:49 +0200 Subject: [PATCH 29/36] Added filtering flags for cluster list commands (#1703) ## Changes Fixes #1701 ## Tests ``` Usage: databricks clusters list [flags] Flags: --cluster-sources []string Filter clusters by source --cluster-states []string Filter clusters by states -h, --help help for list --is-pinned Filter clusters by pinned status --page-size int Use this field to specify the maximum number of results to be returned by the server. --page-token string Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of clusters respectively. --policy-id string Filter clusters by policy id ``` --- cmd/workspace/clusters/overrides.go | 68 ++++++++++++++++++++++++++++- 1 file changed, 67 insertions(+), 1 deletion(-) diff --git a/cmd/workspace/clusters/overrides.go b/cmd/workspace/clusters/overrides.go index 55976d406..6038978ae 100644 --- a/cmd/workspace/clusters/overrides.go +++ b/cmd/workspace/clusters/overrides.go @@ -1,17 +1,83 @@ package clusters import ( + "strings" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/spf13/cobra" ) -func listOverride(listCmd *cobra.Command, _ *compute.ListClustersRequest) { +// Below we add overrides for filter flags for cluster list command to allow for custom filtering +// Auto generating such flags is not yet supported by the CLI generator +func listOverride(listCmd *cobra.Command, listReq *compute.ListClustersRequest) { listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` {{header "ID"}} {{header "Name"}} {{header "State"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` {{range .}}{{.ClusterId | green}} {{.ClusterName | cyan}} {{if eq .State "RUNNING"}}{{green "%s" .State}}{{else if eq .State "TERMINATED"}}{{red "%s" .State}}{{else}}{{blue "%s" .State}}{{end}} {{end}}`) + + listReq.FilterBy = &compute.ListClustersFilterBy{} + listCmd.Flags().BoolVar(&listReq.FilterBy.IsPinned, "is-pinned", false, "Filter clusters by pinned status") + listCmd.Flags().StringVar(&listReq.FilterBy.PolicyId, "policy-id", "", "Filter clusters by policy id") + + sources := &clusterSources{source: &listReq.FilterBy.ClusterSources} + listCmd.Flags().Var(sources, "cluster-sources", "Filter clusters by source") + + states := &clusterStates{state: &listReq.FilterBy.ClusterStates} + listCmd.Flags().Var(states, "cluster-states", "Filter clusters by states") +} + +type clusterSources struct { + source *[]compute.ClusterSource +} + +func (c *clusterSources) String() string { + s := make([]string, len(*c.source)) + for i, source := range *c.source { + s[i] = string(source) + } + + return strings.Join(s, ",") +} + +func (c *clusterSources) Set(value string) error { + splits := strings.Split(value, ",") + for _, split := range splits { + *c.source = append(*c.source, compute.ClusterSource(split)) + } + + return nil +} + +func (c *clusterSources) Type() string { + return "[]string" +} + +type clusterStates struct { + state *[]compute.State +} + +func (c *clusterStates) String() string { + s := make([]string, len(*c.state)) + for i, source := range *c.state { + s[i] = string(source) + } + + return strings.Join(s, ",") +} + +func (c *clusterStates) Set(value string) error { + splits := strings.Split(value, ",") + for _, split := range splits { + *c.state = append(*c.state, compute.State(split)) + } + + return nil +} + +func (c *clusterStates) Type() string { + return "[]string" } func listNodeTypesOverride(listNodeTypesCmd *cobra.Command) { From 6e8cd835a3f699ffec0c04e9301e3a49fd61fc9c Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Wed, 21 Aug 2024 17:33:25 +0200 Subject: [PATCH 30/36] Add paths field to bundle sync configuration (#1694) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## Changes This field allows a user to configure paths to synchronize to the workspace. Allowed values are relative paths to files and directories anchored at the directory where the field is set. If one or more values traverse up the directory tree (to an ancestor of the bundle root directory), the CLI will dynamically determine the root path to use to ensure that the file tree structure remains intact. For example, given a `databricks.yml` in `my_bundle` that includes: ```yaml sync: paths: - ../common - . ``` Then upon synchronization, the workspace will look like: ``` . ├── common │ └── lib.py └── my_bundle ├── databricks.yml └── notebook.py ``` If not set behavior remains identical. ## Tests * Newly added unit tests for the mutators and under `bundle/tests`. * Manually confirmed a bundle without this configuration works the same. * Manually confirmed a bundle with this configuration works. --- bundle/bundle.go | 8 + bundle/bundle_read_only.go | 4 + bundle/config/mutator/configure_wsfs.go | 4 +- bundle/config/mutator/rewrite_sync_paths.go | 4 + .../config/mutator/rewrite_sync_paths_test.go | 16 ++ bundle/config/mutator/sync_default_path.go | 48 +++++ .../config/mutator/sync_default_path_test.go | 82 ++++++++ bundle/config/mutator/sync_infer_root.go | 120 +++++++++++ .../mutator/sync_infer_root_internal_test.go | 72 +++++++ bundle/config/mutator/sync_infer_root_test.go | 198 ++++++++++++++++++ bundle/config/mutator/trampoline.go | 2 +- bundle/config/mutator/trampoline_test.go | 8 +- bundle/config/mutator/translate_paths.go | 12 +- bundle/config/mutator/translate_paths_test.go | 60 +++--- bundle/config/sync.go | 4 + bundle/deploy/files/sync.go | 4 +- bundle/deploy/state_pull.go | 2 +- bundle/deploy/state_pull_test.go | 8 +- bundle/phases/initialize.go | 11 + bundle/python/conditional_transform_test.go | 22 +- bundle/tests/loader.go | 2 + bundle/tests/sync/paths/databricks.yml | 20 ++ .../tests/sync/paths_no_root/databricks.yml | 26 +++ .../sync/shared_code/bundle/databricks.yml | 10 + .../tests/sync/shared_code/common/library.txt | 1 + bundle/tests/sync_test.go | 65 ++++++ cmd/sync/sync_test.go | 6 +- 27 files changed, 760 insertions(+), 59 deletions(-) create mode 100644 bundle/config/mutator/sync_default_path.go create mode 100644 bundle/config/mutator/sync_default_path_test.go create mode 100644 bundle/config/mutator/sync_infer_root.go create mode 100644 bundle/config/mutator/sync_infer_root_internal_test.go create mode 100644 bundle/config/mutator/sync_infer_root_test.go create mode 100644 bundle/tests/sync/paths/databricks.yml create mode 100644 bundle/tests/sync/paths_no_root/databricks.yml create mode 100644 bundle/tests/sync/shared_code/bundle/databricks.yml create mode 100644 bundle/tests/sync/shared_code/common/library.txt diff --git a/bundle/bundle.go b/bundle/bundle.go index 032d98abc..8b5ff976d 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -39,6 +39,14 @@ type Bundle struct { // Exclusively use this field for filesystem operations. BundleRoot vfs.Path + // SyncRoot is a virtual filesystem path to the root directory of the files that are synchronized to the workspace. + // It can be an ancestor to [BundleRoot], but not a descendant; that is, [SyncRoot] must contain [BundleRoot]. + SyncRoot vfs.Path + + // SyncRootPath is the local path to the root directory of files that are synchronized to the workspace. + // It is equal to `SyncRoot.Native()` and included as dedicated field for convenient access. + SyncRootPath string + Config config.Root // Metadata about the bundle deployment. This is the interface Databricks services diff --git a/bundle/bundle_read_only.go b/bundle/bundle_read_only.go index 59084f2ac..74b9d94de 100644 --- a/bundle/bundle_read_only.go +++ b/bundle/bundle_read_only.go @@ -28,6 +28,10 @@ func (r ReadOnlyBundle) BundleRoot() vfs.Path { return r.b.BundleRoot } +func (r ReadOnlyBundle) SyncRoot() vfs.Path { + return r.b.SyncRoot +} + func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient { return r.b.WorkspaceClient() } diff --git a/bundle/config/mutator/configure_wsfs.go b/bundle/config/mutator/configure_wsfs.go index c7b764f00..1d1bec582 100644 --- a/bundle/config/mutator/configure_wsfs.go +++ b/bundle/config/mutator/configure_wsfs.go @@ -24,7 +24,7 @@ func (m *configureWSFS) Name() string { } func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - root := b.BundleRoot.Native() + root := b.SyncRoot.Native() // The bundle root must be located in /Workspace/ if !strings.HasPrefix(root, "/Workspace/") { @@ -45,6 +45,6 @@ func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagno return diag.FromErr(err) } - b.BundleRoot = p + b.SyncRoot = p return nil } diff --git a/bundle/config/mutator/rewrite_sync_paths.go b/bundle/config/mutator/rewrite_sync_paths.go index cfdc55f36..888714abe 100644 --- a/bundle/config/mutator/rewrite_sync_paths.go +++ b/bundle/config/mutator/rewrite_sync_paths.go @@ -45,6 +45,10 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) { + v, err = dyn.Map(v, "paths", dyn.Foreach(m.makeRelativeTo(b.RootPath))) + if err != nil { + return dyn.InvalidValue, err + } v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath))) if err != nil { return dyn.InvalidValue, err diff --git a/bundle/config/mutator/rewrite_sync_paths_test.go b/bundle/config/mutator/rewrite_sync_paths_test.go index 56ada19e6..fa7f124b7 100644 --- a/bundle/config/mutator/rewrite_sync_paths_test.go +++ b/bundle/config/mutator/rewrite_sync_paths_test.go @@ -17,6 +17,10 @@ func TestRewriteSyncPathsRelative(t *testing.T) { RootPath: ".", Config: config.Root{ Sync: config.Sync{ + Paths: []string{ + ".", + "../common", + }, Include: []string{ "foo", "bar", @@ -29,6 +33,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) { }, } + bundletest.SetLocation(b, "sync.paths[0]", "./databricks.yml") + bundletest.SetLocation(b, "sync.paths[1]", "./databricks.yml") bundletest.SetLocation(b, "sync.include[0]", "./file.yml") bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml") bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml") @@ -37,6 +43,8 @@ func TestRewriteSyncPathsRelative(t *testing.T) { diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) assert.NoError(t, diags.Error()) + assert.Equal(t, filepath.Clean("."), b.Config.Sync.Paths[0]) + assert.Equal(t, filepath.Clean("../common"), b.Config.Sync.Paths[1]) assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0]) assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1]) assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0]) @@ -48,6 +56,10 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) { RootPath: "/tmp/dir", Config: config.Root{ Sync: config.Sync{ + Paths: []string{ + ".", + "../common", + }, Include: []string{ "foo", "bar", @@ -60,6 +72,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) { }, } + bundletest.SetLocation(b, "sync.paths[0]", "/tmp/dir/databricks.yml") + bundletest.SetLocation(b, "sync.paths[1]", "/tmp/dir/databricks.yml") bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml") bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml") bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml") @@ -68,6 +82,8 @@ func TestRewriteSyncPathsAbsolute(t *testing.T) { diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) assert.NoError(t, diags.Error()) + assert.Equal(t, filepath.Clean("."), b.Config.Sync.Paths[0]) + assert.Equal(t, filepath.Clean("../common"), b.Config.Sync.Paths[1]) assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0]) assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1]) assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0]) diff --git a/bundle/config/mutator/sync_default_path.go b/bundle/config/mutator/sync_default_path.go new file mode 100644 index 000000000..8e14ce202 --- /dev/null +++ b/bundle/config/mutator/sync_default_path.go @@ -0,0 +1,48 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type syncDefaultPath struct{} + +// SyncDefaultPath configures the default sync path to be equal to the bundle root. +func SyncDefaultPath() bundle.Mutator { + return &syncDefaultPath{} +} + +func (m *syncDefaultPath) Name() string { + return "SyncDefaultPath" +} + +func (m *syncDefaultPath) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + isset := false + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + pv, _ := dyn.Get(v, "sync.paths") + + // If the sync paths field is already set, do nothing. + // We know it is set if its value is either a nil or a sequence (empty or not). + switch pv.Kind() { + case dyn.KindNil, dyn.KindSequence: + isset = true + } + + return v, nil + }) + if err != nil { + return diag.FromErr(err) + } + + // If the sync paths field is already set, do nothing. + if isset { + return nil + } + + // Set the sync paths to the default value. + b.Config.Sync.Paths = []string{"."} + return nil +} diff --git a/bundle/config/mutator/sync_default_path_test.go b/bundle/config/mutator/sync_default_path_test.go new file mode 100644 index 000000000..a37e913d2 --- /dev/null +++ b/bundle/config/mutator/sync_default_path_test.go @@ -0,0 +1,82 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSyncDefaultPath_DefaultIfUnset(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "/tmp/some/dir", + Config: config.Root{}, + } + + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SyncDefaultPath()) + require.NoError(t, diags.Error()) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) +} + +func TestSyncDefaultPath_SkipIfSet(t *testing.T) { + tcases := []struct { + name string + paths dyn.Value + expect []string + }{ + { + name: "nil", + paths: dyn.V(nil), + expect: nil, + }, + { + name: "empty sequence", + paths: dyn.V([]dyn.Value{}), + expect: []string{}, + }, + { + name: "non-empty sequence", + paths: dyn.V([]dyn.Value{dyn.V("something")}), + expect: []string{"something"}, + }, + } + + for _, tcase := range tcases { + t.Run(tcase.name, func(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "/tmp/some/dir", + Config: config.Root{}, + } + + diags := bundle.ApplyFunc(context.Background(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + v, err := dyn.Set(v, "sync", dyn.V(dyn.NewMapping())) + if err != nil { + return dyn.InvalidValue, err + } + v, err = dyn.Set(v, "sync.paths", tcase.paths) + if err != nil { + return dyn.InvalidValue, err + } + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + ctx := context.Background() + diags = bundle.Apply(ctx, b, mutator.SyncDefaultPath()) + require.NoError(t, diags.Error()) + + // If the sync paths field is already set, do nothing. + assert.Equal(t, tcase.expect, b.Config.Sync.Paths) + }) + } +} diff --git a/bundle/config/mutator/sync_infer_root.go b/bundle/config/mutator/sync_infer_root.go new file mode 100644 index 000000000..012acf800 --- /dev/null +++ b/bundle/config/mutator/sync_infer_root.go @@ -0,0 +1,120 @@ +package mutator + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/vfs" +) + +type syncInferRoot struct{} + +// SyncInferRoot is a mutator that infers the root path of all files to synchronize by looking at the +// paths in the sync configuration. The sync root may be different from the bundle root +// when the user intends to synchronize files outside the bundle root. +// +// The sync root can be equivalent to or an ancestor of the bundle root, but not a descendant. +// That is, the sync root must contain the bundle root. +// +// This mutator requires all sync-related paths and patterns to be relative to the bundle root path. +// This is done by the [RewriteSyncPaths] mutator, which must run before this mutator. +func SyncInferRoot() bundle.Mutator { + return &syncInferRoot{} +} + +func (m *syncInferRoot) Name() string { + return "SyncInferRoot" +} + +// computeRoot finds the innermost path that contains the specified path. +// It traverses up the root path until it finds the innermost path. +// If the path does not exist, it returns an empty string. +// +// See "sync_infer_root_internal_test.go" for examples. +func (m *syncInferRoot) computeRoot(path string, root string) string { + for !filepath.IsLocal(path) { + // Break if we have reached the root of the filesystem. + dir := filepath.Dir(root) + if dir == root { + return "" + } + + // Update the sync path as we navigate up the directory tree. + path = filepath.Join(filepath.Base(root), path) + + // Move up the directory tree. + root = dir + } + + return filepath.Clean(root) +} + +func (m *syncInferRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + var diags diag.Diagnostics + + // Use the bundle root path as the starting point for inferring the sync root path. + bundleRootPath := filepath.Clean(b.RootPath) + + // Infer the sync root path by looking at each one of the sync paths. + // Every sync path must be a descendant of the final sync root path. + syncRootPath := bundleRootPath + for _, path := range b.Config.Sync.Paths { + computedPath := m.computeRoot(path, bundleRootPath) + if computedPath == "" { + continue + } + + // Update sync root path if the computed root path is an ancestor of the current sync root path. + if len(computedPath) < len(syncRootPath) { + syncRootPath = computedPath + } + } + + // The new sync root path can only be an ancestor of the previous root path. + // Compute the relative path from the sync root to the bundle root. + rel, err := filepath.Rel(syncRootPath, bundleRootPath) + if err != nil { + return diag.FromErr(err) + } + + // If during computation of the sync root path we hit the root of the filesystem, + // then one or more of the sync paths are outside the filesystem. + // Check if this happened by verifying that none of the paths escape the root + // when joined with the sync root path. + for i, path := range b.Config.Sync.Paths { + if filepath.IsLocal(filepath.Join(rel, path)) { + continue + } + + diags = append(diags, diag.Diagnostic{ + Severity: diag.Error, + Summary: fmt.Sprintf("invalid sync path %q", path), + Locations: b.Config.GetLocations(fmt.Sprintf("sync.paths[%d]", i)), + Paths: []dyn.Path{dyn.NewPath(dyn.Key("sync"), dyn.Key("paths"), dyn.Index(i))}, + }) + } + + if diags.HasError() { + return diags + } + + // Update all paths in the sync configuration to be relative to the sync root. + for i, p := range b.Config.Sync.Paths { + b.Config.Sync.Paths[i] = filepath.Join(rel, p) + } + for i, p := range b.Config.Sync.Include { + b.Config.Sync.Include[i] = filepath.Join(rel, p) + } + for i, p := range b.Config.Sync.Exclude { + b.Config.Sync.Exclude[i] = filepath.Join(rel, p) + } + + // Configure the sync root path. + b.SyncRoot = vfs.MustNew(syncRootPath) + b.SyncRootPath = syncRootPath + return nil +} diff --git a/bundle/config/mutator/sync_infer_root_internal_test.go b/bundle/config/mutator/sync_infer_root_internal_test.go new file mode 100644 index 000000000..9ab9c88f4 --- /dev/null +++ b/bundle/config/mutator/sync_infer_root_internal_test.go @@ -0,0 +1,72 @@ +package mutator + +import ( + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestSyncInferRootInternal_ComputeRoot(t *testing.T) { + s := syncInferRoot{} + + tcases := []struct { + path string + root string + out string + }{ + { + // Test that "." doesn't change the root. + path: ".", + root: "/tmp/some/dir", + out: "/tmp/some/dir", + }, + { + // Test that a subdirectory doesn't change the root. + path: "sub", + root: "/tmp/some/dir", + out: "/tmp/some/dir", + }, + { + // Test that a parent directory changes the root. + path: "../common", + root: "/tmp/some/dir", + out: "/tmp/some", + }, + { + // Test that a deeply nested parent directory changes the root. + path: "../../../../../../common", + root: "/tmp/some/dir/that/is/very/deeply/nested", + out: "/tmp/some", + }, + { + // Test that a parent directory changes the root at the filesystem root boundary. + path: "../common", + root: "/tmp", + out: "/", + }, + { + // Test that an invalid parent directory doesn't change the root and returns an empty string. + path: "../common", + root: "/", + out: "", + }, + { + // Test that the returned path is cleaned even if the root doesn't change. + path: "sub", + root: "/tmp/some/../dir", + out: "/tmp/dir", + }, + { + // Test that a relative root path also works. + path: "../common", + root: "foo/bar", + out: "foo", + }, + } + + for _, tc := range tcases { + out := s.computeRoot(tc.path, tc.root) + assert.Equal(t, tc.out, filepath.ToSlash(out)) + } +} diff --git a/bundle/config/mutator/sync_infer_root_test.go b/bundle/config/mutator/sync_infer_root_test.go new file mode 100644 index 000000000..383e56769 --- /dev/null +++ b/bundle/config/mutator/sync_infer_root_test.go @@ -0,0 +1,198 @@ +package mutator_test + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSyncInferRoot_NominalAbsolute(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "/tmp/some/dir", + Config: config.Root{ + Sync: config.Sync{ + Paths: []string{ + ".", + }, + Include: []string{ + "foo", + "bar", + }, + Exclude: []string{ + "baz", + "qux", + }, + }, + }, + } + + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SyncInferRoot()) + assert.NoError(t, diags.Error()) + assert.Equal(t, filepath.FromSlash("/tmp/some/dir"), b.SyncRootPath) + + // Check that the paths are unchanged. + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) + assert.Equal(t, []string{"foo", "bar"}, b.Config.Sync.Include) + assert.Equal(t, []string{"baz", "qux"}, b.Config.Sync.Exclude) +} + +func TestSyncInferRoot_NominalRelative(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "./some/dir", + Config: config.Root{ + Sync: config.Sync{ + Paths: []string{ + ".", + }, + Include: []string{ + "foo", + "bar", + }, + Exclude: []string{ + "baz", + "qux", + }, + }, + }, + } + + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SyncInferRoot()) + assert.NoError(t, diags.Error()) + assert.Equal(t, filepath.FromSlash("some/dir"), b.SyncRootPath) + + // Check that the paths are unchanged. + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) + assert.Equal(t, []string{"foo", "bar"}, b.Config.Sync.Include) + assert.Equal(t, []string{"baz", "qux"}, b.Config.Sync.Exclude) +} + +func TestSyncInferRoot_ParentDirectory(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "/tmp/some/dir", + Config: config.Root{ + Sync: config.Sync{ + Paths: []string{ + "../common", + }, + Include: []string{ + "foo", + "bar", + }, + Exclude: []string{ + "baz", + "qux", + }, + }, + }, + } + + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SyncInferRoot()) + assert.NoError(t, diags.Error()) + assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath) + + // Check that the paths are updated. + assert.Equal(t, []string{"common"}, b.Config.Sync.Paths) + assert.Equal(t, []string{filepath.FromSlash("dir/foo"), filepath.FromSlash("dir/bar")}, b.Config.Sync.Include) + assert.Equal(t, []string{filepath.FromSlash("dir/baz"), filepath.FromSlash("dir/qux")}, b.Config.Sync.Exclude) +} + +func TestSyncInferRoot_ManyParentDirectories(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "/tmp/some/dir/that/is/very/deeply/nested", + Config: config.Root{ + Sync: config.Sync{ + Paths: []string{ + "../../../../../../common", + }, + Include: []string{ + "foo", + "bar", + }, + Exclude: []string{ + "baz", + "qux", + }, + }, + }, + } + + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SyncInferRoot()) + assert.NoError(t, diags.Error()) + assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath) + + // Check that the paths are updated. + assert.Equal(t, []string{"common"}, b.Config.Sync.Paths) + assert.Equal(t, []string{ + filepath.FromSlash("dir/that/is/very/deeply/nested/foo"), + filepath.FromSlash("dir/that/is/very/deeply/nested/bar"), + }, b.Config.Sync.Include) + assert.Equal(t, []string{ + filepath.FromSlash("dir/that/is/very/deeply/nested/baz"), + filepath.FromSlash("dir/that/is/very/deeply/nested/qux"), + }, b.Config.Sync.Exclude) +} + +func TestSyncInferRoot_MultiplePaths(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "/tmp/some/bundle/root", + Config: config.Root{ + Sync: config.Sync{ + Paths: []string{ + "./foo", + "../common", + "./bar", + "../../baz", + }, + }, + }, + } + + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SyncInferRoot()) + assert.NoError(t, diags.Error()) + assert.Equal(t, filepath.FromSlash("/tmp/some"), b.SyncRootPath) + + // Check that the paths are updated. + assert.Equal(t, filepath.FromSlash("bundle/root/foo"), b.Config.Sync.Paths[0]) + assert.Equal(t, filepath.FromSlash("bundle/common"), b.Config.Sync.Paths[1]) + assert.Equal(t, filepath.FromSlash("bundle/root/bar"), b.Config.Sync.Paths[2]) + assert.Equal(t, filepath.FromSlash("baz"), b.Config.Sync.Paths[3]) +} + +func TestSyncInferRoot_Error(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "/tmp/some/dir", + Config: config.Root{ + Sync: config.Sync{ + Paths: []string{ + "../../../../error", + "../../../thisworks", + "../../../../../error", + }, + }, + }, + } + + bundletest.SetLocation(b, "sync.paths", "databricks.yml") + + ctx := context.Background() + diags := bundle.Apply(ctx, b, mutator.SyncInferRoot()) + require.Len(t, diags, 2) + assert.Equal(t, `invalid sync path "../../../../error"`, diags[0].Summary) + assert.Equal(t, "databricks.yml:0:0", diags[0].Locations[0].String()) + assert.Equal(t, "sync.paths[0]", diags[0].Paths[0].String()) + assert.Equal(t, `invalid sync path "../../../../../error"`, diags[1].Summary) + assert.Equal(t, "databricks.yml:0:0", diags[1].Locations[0].String()) + assert.Equal(t, "sync.paths[2]", diags[1].Paths[0].String()) +} diff --git a/bundle/config/mutator/trampoline.go b/bundle/config/mutator/trampoline.go index dde9a299e..dcca50149 100644 --- a/bundle/config/mutator/trampoline.go +++ b/bundle/config/mutator/trampoline.go @@ -82,7 +82,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund return err } - internalDirRel, err := filepath.Rel(b.RootPath, internalDir) + internalDirRel, err := filepath.Rel(b.SyncRootPath, internalDir) if err != nil { return err } diff --git a/bundle/config/mutator/trampoline_test.go b/bundle/config/mutator/trampoline_test.go index de395c165..08d3c8220 100644 --- a/bundle/config/mutator/trampoline_test.go +++ b/bundle/config/mutator/trampoline_test.go @@ -56,8 +56,12 @@ func TestGenerateTrampoline(t *testing.T) { } b := &bundle.Bundle{ - RootPath: tmpDir, + RootPath: filepath.Join(tmpDir, "parent", "my_bundle"), + SyncRootPath: filepath.Join(tmpDir, "parent"), Config: config.Root{ + Workspace: config.Workspace{ + FilePath: "/Workspace/files", + }, Bundle: config.Bundle{ Target: "development", }, @@ -89,6 +93,6 @@ func TestGenerateTrampoline(t *testing.T) { require.Equal(t, "Hello from Trampoline", string(bytes)) task := b.Config.Resources.Jobs["test"].Tasks[0] - require.Equal(t, task.NotebookTask.NotebookPath, ".databricks/bundle/development/.internal/notebook_test_to_trampoline") + require.Equal(t, "/Workspace/files/my_bundle/.databricks/bundle/development/.internal/notebook_test_to_trampoline", task.NotebookTask.NotebookPath) require.Nil(t, task.PythonWheelTask) } diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 28f7d3d30..5f22570e7 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -93,14 +93,14 @@ func (t *translateContext) rewritePath( return nil } - // Local path must be contained in the bundle root. + // Local path must be contained in the sync root. // If it isn't, it won't be synchronized into the workspace. - localRelPath, err := filepath.Rel(t.b.RootPath, localPath) + localRelPath, err := filepath.Rel(t.b.SyncRootPath, localPath) if err != nil { return err } if strings.HasPrefix(localRelPath, "..") { - return fmt.Errorf("path %s is not contained in bundle root path", localPath) + return fmt.Errorf("path %s is not contained in sync root path", localPath) } // Prefix remote path with its remote root path. @@ -118,7 +118,7 @@ func (t *translateContext) rewritePath( } func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath)) + nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath)) if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("notebook %s not found", literal) } @@ -134,7 +134,7 @@ func (t *translateContext) translateNotebookPath(literal, localFullPath, localRe } func (t *translateContext) translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath)) + nb, _, err := notebook.DetectWithFS(t.b.SyncRoot, filepath.ToSlash(localRelPath)) if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("file %s not found", literal) } @@ -148,7 +148,7 @@ func (t *translateContext) translateFilePath(literal, localFullPath, localRelPat } func (t *translateContext) translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - info, err := t.b.BundleRoot.Stat(filepath.ToSlash(localRelPath)) + info, err := t.b.SyncRoot.Stat(filepath.ToSlash(localRelPath)) if err != nil { return "", err } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index fd64593be..50fcd3b07 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -41,8 +41,8 @@ func touchEmptyFile(t *testing.T, path string) { func TestTranslatePathsSkippedWithGitSource(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -113,8 +113,8 @@ func TestTranslatePaths(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "requirements.txt")) b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -289,8 +289,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml")) b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -380,12 +380,12 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { ) } -func TestTranslatePathsOutsideBundleRoot(t *testing.T) { +func TestTranslatePathsOutsideSyncRoot(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -411,15 +411,15 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml")) diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, diags.Error(), "is not contained in bundle root") + assert.ErrorContains(t, diags.Error(), "is not contained in sync root path") } func TestJobNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -449,8 +449,8 @@ func TestJobFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -480,8 +480,8 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ @@ -511,8 +511,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ @@ -543,8 +543,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -578,8 +578,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -613,8 +613,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -648,8 +648,8 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -684,8 +684,8 @@ func TestTranslatePathJobEnvironments(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "env2.py")) b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -724,8 +724,8 @@ func TestTranslatePathJobEnvironments(t *testing.T) { func TestTranslatePathWithComplexVariables(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, - BundleRoot: vfs.MustNew(dir), + SyncRootPath: dir, + SyncRoot: vfs.MustNew(dir), Config: config.Root{ Variables: map[string]*variable.Variable{ "cluster_libraries": { diff --git a/bundle/config/sync.go b/bundle/config/sync.go index 0580e4c4f..377b1333e 100644 --- a/bundle/config/sync.go +++ b/bundle/config/sync.go @@ -1,6 +1,10 @@ package config type Sync struct { + // Paths contains a list of paths to synchronize relative to the bundle root path. + // If not configured, this defaults to synchronizing everything in the bundle root path (i.e. `.`). + Paths []string `json:"paths,omitempty"` + // Include contains a list of globs evaluated relative to the bundle root path // to explicitly include files that were excluded by the user's gitignore. Include []string `json:"include,omitempty"` diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index dc45053f9..347ed3079 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -28,8 +28,8 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp } opts := &sync.SyncOptions{ - LocalRoot: rb.BundleRoot(), - Paths: []string{"."}, + LocalRoot: rb.SyncRoot(), + Paths: rb.Config().Sync.Paths, Include: includes, Exclude: rb.Config().Sync.Exclude, diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index 24ed9d360..5e301a6f3 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -85,7 +85,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic } log.Infof(ctx, "Creating new snapshot") - snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.BundleRoot), opts) + snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.SyncRoot), opts) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index 38f0b4021..f75193065 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -64,6 +64,10 @@ func testStatePull(t *testing.T, opts statePullOpts) { b := &bundle.Bundle{ RootPath: tmpDir, BundleRoot: vfs.MustNew(tmpDir), + + SyncRootPath: tmpDir, + SyncRoot: vfs.MustNew(tmpDir), + Config: config.Root{ Bundle: config.Bundle{ Target: "default", @@ -81,11 +85,11 @@ func testStatePull(t *testing.T, opts statePullOpts) { ctx := context.Background() for _, file := range opts.localFiles { - testutil.Touch(t, b.RootPath, "bar", file) + testutil.Touch(t, b.SyncRootPath, "bar", file) } for _, file := range opts.localNotebooks { - testutil.TouchNotebook(t, b.RootPath, "bar", file) + testutil.TouchNotebook(t, b.SyncRootPath, "bar", file) } if opts.withExistingSnapshot { diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 7a1081ded..8039a4f13 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -21,7 +21,18 @@ func Initialize() bundle.Mutator { "initialize", []bundle.Mutator{ validate.AllResourcesHaveValues(), + + // Update all path fields in the sync block to be relative to the bundle root path. mutator.RewriteSyncPaths(), + + // Configure the default sync path to equal the bundle root if not explicitly configured. + // By default, this means all files in the bundle root directory are synchronized. + mutator.SyncDefaultPath(), + + // Figure out if the sync root path is identical or an ancestor of the bundle root path. + // If it is an ancestor, this updates all paths to be relative to the sync root path. + mutator.SyncInferRoot(), + mutator.MergeJobClusters(), mutator.MergeJobParameters(), mutator.MergeJobTasks(), diff --git a/bundle/python/conditional_transform_test.go b/bundle/python/conditional_transform_test.go index 677970d70..1d397f7a7 100644 --- a/bundle/python/conditional_transform_test.go +++ b/bundle/python/conditional_transform_test.go @@ -2,7 +2,6 @@ package python import ( "context" - "path" "path/filepath" "testing" @@ -18,11 +17,15 @@ func TestNoTransformByDefault(t *testing.T) { tmpDir := t.TempDir() b := &bundle.Bundle{ - RootPath: tmpDir, + RootPath: filepath.Join(tmpDir, "parent", "my_bundle"), + SyncRootPath: filepath.Join(tmpDir, "parent"), Config: config.Root{ Bundle: config.Bundle{ Target: "development", }, + Workspace: config.Workspace{ + FilePath: "/Workspace/files", + }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": { @@ -63,11 +66,15 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { tmpDir := t.TempDir() b := &bundle.Bundle{ - RootPath: tmpDir, + RootPath: filepath.Join(tmpDir, "parent", "my_bundle"), + SyncRootPath: filepath.Join(tmpDir, "parent"), Config: config.Root{ Bundle: config.Bundle{ Target: "development", }, + Workspace: config.Workspace{ + FilePath: "/Workspace/files", + }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job1": { @@ -102,14 +109,7 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { task := b.Config.Resources.Jobs["job1"].Tasks[0] require.Nil(t, task.PythonWheelTask) require.NotNil(t, task.NotebookTask) - - dir, err := b.InternalDir(context.Background()) - require.NoError(t, err) - - internalDirRel, err := filepath.Rel(b.RootPath, dir) - require.NoError(t, err) - - require.Equal(t, path.Join(filepath.ToSlash(internalDirRel), "notebook_job1_key1"), task.NotebookTask.NotebookPath) + require.Equal(t, "/Workspace/files/my_bundle/.databricks/bundle/development/.internal/notebook_job1_key1", task.NotebookTask.NotebookPath) require.Len(t, task.Libraries, 1) require.Equal(t, "/Workspace/Users/test@test.com/bundle/dist/test.jar", task.Libraries[0].Jar) diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index 848132a13..5c48d81cb 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -40,6 +40,8 @@ func loadTargetWithDiags(path, env string) (*bundle.Bundle, diag.Diagnostics) { diags := bundle.Apply(ctx, b, bundle.Seq( phases.LoadNamedTarget(env), mutator.RewriteSyncPaths(), + mutator.SyncDefaultPath(), + mutator.SyncInferRoot(), mutator.MergeJobClusters(), mutator.MergeJobParameters(), mutator.MergeJobTasks(), diff --git a/bundle/tests/sync/paths/databricks.yml b/bundle/tests/sync/paths/databricks.yml new file mode 100644 index 000000000..9ef6fa032 --- /dev/null +++ b/bundle/tests/sync/paths/databricks.yml @@ -0,0 +1,20 @@ +bundle: + name: sync_paths + +workspace: + host: https://acme.cloud.databricks.com/ + +sync: + paths: + - src + +targets: + development: + sync: + paths: + - development + + staging: + sync: + paths: + - staging diff --git a/bundle/tests/sync/paths_no_root/databricks.yml b/bundle/tests/sync/paths_no_root/databricks.yml new file mode 100644 index 000000000..df15b12b6 --- /dev/null +++ b/bundle/tests/sync/paths_no_root/databricks.yml @@ -0,0 +1,26 @@ +bundle: + name: sync_paths + +workspace: + host: https://acme.cloud.databricks.com/ + +targets: + development: + sync: + paths: + - development + + staging: + sync: + paths: + - staging + + undefined: ~ + + nil: + sync: + paths: ~ + + empty: + sync: + paths: [] diff --git a/bundle/tests/sync/shared_code/bundle/databricks.yml b/bundle/tests/sync/shared_code/bundle/databricks.yml new file mode 100644 index 000000000..738b6170c --- /dev/null +++ b/bundle/tests/sync/shared_code/bundle/databricks.yml @@ -0,0 +1,10 @@ +bundle: + name: shared_code + +workspace: + host: https://acme.cloud.databricks.com/ + +sync: + paths: + - "../common" + - "." diff --git a/bundle/tests/sync/shared_code/common/library.txt b/bundle/tests/sync/shared_code/common/library.txt new file mode 100644 index 000000000..83b323843 --- /dev/null +++ b/bundle/tests/sync/shared_code/common/library.txt @@ -0,0 +1 @@ +Placeholder for files to be deployed as part of multiple bundles. diff --git a/bundle/tests/sync_test.go b/bundle/tests/sync_test.go index d08e889c3..15644b67e 100644 --- a/bundle/tests/sync_test.go +++ b/bundle/tests/sync_test.go @@ -12,14 +12,20 @@ func TestSyncOverride(t *testing.T) { var b *bundle.Bundle b = loadTarget(t, "./sync/override", "development") + assert.Equal(t, filepath.FromSlash("sync/override"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("tests/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) b = loadTarget(t, "./sync/override", "staging") + assert.Equal(t, filepath.FromSlash("sync/override"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) b = loadTarget(t, "./sync/override", "prod") + assert.Equal(t, filepath.FromSlash("sync/override"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.ElementsMatch(t, []string{filepath.FromSlash("src/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) } @@ -28,14 +34,20 @@ func TestSyncOverrideNoRootSync(t *testing.T) { var b *bundle.Bundle b = loadTarget(t, "./sync/override_no_root", "development") + assert.Equal(t, filepath.FromSlash("sync/override_no_root"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) b = loadTarget(t, "./sync/override_no_root", "staging") + assert.Equal(t, filepath.FromSlash("sync/override_no_root"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.ElementsMatch(t, []string{filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) b = loadTarget(t, "./sync/override_no_root", "prod") + assert.Equal(t, filepath.FromSlash("sync/override_no_root"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) } @@ -44,10 +56,14 @@ func TestSyncNil(t *testing.T) { var b *bundle.Bundle b = loadTarget(t, "./sync/nil", "development") + assert.Equal(t, filepath.FromSlash("sync/nil"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.Nil(t, b.Config.Sync.Include) assert.Nil(t, b.Config.Sync.Exclude) b = loadTarget(t, "./sync/nil", "staging") + assert.Equal(t, filepath.FromSlash("sync/nil"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) } @@ -56,10 +72,59 @@ func TestSyncNilRoot(t *testing.T) { var b *bundle.Bundle b = loadTarget(t, "./sync/nil_root", "development") + assert.Equal(t, filepath.FromSlash("sync/nil_root"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.Nil(t, b.Config.Sync.Include) assert.Nil(t, b.Config.Sync.Exclude) b = loadTarget(t, "./sync/nil_root", "staging") + assert.Equal(t, filepath.FromSlash("sync/nil_root"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) } + +func TestSyncPaths(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/paths", "development") + assert.Equal(t, filepath.FromSlash("sync/paths"), b.SyncRootPath) + assert.Equal(t, []string{"src", "development"}, b.Config.Sync.Paths) + + b = loadTarget(t, "./sync/paths", "staging") + assert.Equal(t, filepath.FromSlash("sync/paths"), b.SyncRootPath) + assert.Equal(t, []string{"src", "staging"}, b.Config.Sync.Paths) +} + +func TestSyncPathsNoRoot(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/paths_no_root", "development") + assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath) + assert.ElementsMatch(t, []string{"development"}, b.Config.Sync.Paths) + + b = loadTarget(t, "./sync/paths_no_root", "staging") + assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath) + assert.ElementsMatch(t, []string{"staging"}, b.Config.Sync.Paths) + + // If not set at all, it defaults to "." + b = loadTarget(t, "./sync/paths_no_root", "undefined") + assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath) + assert.Equal(t, []string{"."}, b.Config.Sync.Paths) + + // If set to nil, it won't sync anything. + b = loadTarget(t, "./sync/paths_no_root", "nil") + assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath) + assert.Len(t, b.Config.Sync.Paths, 0) + + // If set to an empty sequence, it won't sync anything. + b = loadTarget(t, "./sync/paths_no_root", "empty") + assert.Equal(t, filepath.FromSlash("sync/paths_no_root"), b.SyncRootPath) + assert.Len(t, b.Config.Sync.Paths, 0) +} + +func TestSyncSharedCode(t *testing.T) { + b := loadTarget(t, "./sync/shared_code/bundle", "default") + assert.Equal(t, filepath.FromSlash("sync/shared_code"), b.SyncRootPath) + assert.ElementsMatch(t, []string{"common", "bundle"}, b.Config.Sync.Paths) +} diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index 0d0c57385..bd03eec91 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -17,8 +17,10 @@ import ( func TestSyncOptionsFromBundle(t *testing.T) { tempDir := t.TempDir() b := &bundle.Bundle{ - RootPath: tempDir, - BundleRoot: vfs.MustNew(tempDir), + RootPath: tempDir, + BundleRoot: vfs.MustNew(tempDir), + SyncRootPath: tempDir, + SyncRoot: vfs.MustNew(tempDir), Config: config.Root{ Bundle: config.Bundle{ Target: "default", From 35e48be81c634e50164edcfb086c362e948ca57e Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Thu, 22 Aug 2024 10:44:22 +0200 Subject: [PATCH 31/36] [Release] Release v0.227.0 (#1705) CLI: * Added filtering flags for cluster list commands ([#1703](https://github.com/databricks/cli/pull/1703)). Bundles: * Remove reference to "dbt" in the default-sql template ([#1696](https://github.com/databricks/cli/pull/1696)). * Pause continuous pipelines when 'mode: development' is used ([#1590](https://github.com/databricks/cli/pull/1590)). * Add configurable presets for name prefixes, tags, etc. ([#1490](https://github.com/databricks/cli/pull/1490)). * Report all empty resources present in error diagnostic ([#1685](https://github.com/databricks/cli/pull/1685)). * Improves detection of PyPI package names in environment dependencies ([#1699](https://github.com/databricks/cli/pull/1699)). * [DAB] Add support for requirements libraries in Job Tasks ([#1543](https://github.com/databricks/cli/pull/1543)). * Add paths field to bundle sync configuration ([#1694](https://github.com/databricks/cli/pull/1694)). Internal: * Add `import` option for PyDABs ([#1693](https://github.com/databricks/cli/pull/1693)). * Make fileset take optional list of paths to list ([#1684](https://github.com/databricks/cli/pull/1684)). * Pass through paths argument to libs/sync ([#1689](https://github.com/databricks/cli/pull/1689)). * Correctly mark package names with versions as remote libraries ([#1697](https://github.com/databricks/cli/pull/1697)). * Share test initializer in common helper function ([#1695](https://github.com/databricks/cli/pull/1695)). * Make `pydabs/venv_path` optional ([#1687](https://github.com/databricks/cli/pull/1687)). * Use API mocks for duplicate path errors in workspace files extensions client ([#1690](https://github.com/databricks/cli/pull/1690)). * Fix prefix preset used for UC schemas ([#1704](https://github.com/databricks/cli/pull/1704)). --- CHANGELOG.md | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 39960e308..88a62d098 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,31 @@ # Version changelog +## [Release] Release v0.227.0 + +CLI: + * Added filtering flags for cluster list commands ([#1703](https://github.com/databricks/cli/pull/1703)). + +Bundles: + * Allow users to configure paths (including outside of the bundle root) to synchronize to the workspace. ([#1694](https://github.com/databricks/cli/pull/1694)). + * Add configurable presets for name prefixes, tags, etc. ([#1490](https://github.com/databricks/cli/pull/1490)). + * Add support for requirements libraries in Job Tasks ([#1543](https://github.com/databricks/cli/pull/1543)). + * Remove reference to "dbt" in the default-sql template ([#1696](https://github.com/databricks/cli/pull/1696)). + * Pause continuous pipelines when 'mode: development' is used ([#1590](https://github.com/databricks/cli/pull/1590)). + * Report all empty resources present in error diagnostic ([#1685](https://github.com/databricks/cli/pull/1685)). + * Improves detection of PyPI package names in environment dependencies ([#1699](https://github.com/databricks/cli/pull/1699)). + +Internal: + * Add `import` option for PyDABs ([#1693](https://github.com/databricks/cli/pull/1693)). + * Make fileset take optional list of paths to list ([#1684](https://github.com/databricks/cli/pull/1684)). + * Pass through paths argument to libs/sync ([#1689](https://github.com/databricks/cli/pull/1689)). + * Correctly mark package names with versions as remote libraries ([#1697](https://github.com/databricks/cli/pull/1697)). + * Share test initializer in common helper function ([#1695](https://github.com/databricks/cli/pull/1695)). + * Make `pydabs/venv_path` optional ([#1687](https://github.com/databricks/cli/pull/1687)). + * Use API mocks for duplicate path errors in workspace files extensions client ([#1690](https://github.com/databricks/cli/pull/1690)). + * Fix prefix preset used for UC schemas ([#1704](https://github.com/databricks/cli/pull/1704)). + + + ## [Release] Release v0.226.0 CLI: From 7fe08c2386edfa503985d93b1e6f633aa85e4f74 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 22 Aug 2024 20:34:26 +0530 Subject: [PATCH 32/36] Revert hc-install version to 0.7.0 (#1711) ## Changes With hc-install version `0.8.0` there was a regression where debug logs would be leaked into stderr. Reported upstream in https://github.com/hashicorp/hc-install/issues/239. Meanwhile we need to revert and pin to version`0.7.0`. This PR also includes a regression test. ## Tests Regression test. --- go.mod | 3 +-- go.sum | 8 ++------ internal/bundle/deploy_test.go | 31 +++++++++++++++++++++++++++++++ internal/bundle/helpers.go | 29 +++++++++++++++++++++++++++++ 4 files changed, 63 insertions(+), 8 deletions(-) diff --git a/go.mod b/go.mod index 1457a4d67..838a45f36 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause github.com/hashicorp/go-version v1.7.0 // MPL 2.0 - github.com/hashicorp/hc-install v0.8.0 // MPL 2.0 + github.com/hashicorp/hc-install v0.7.0 // MPL 2.0 github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0 github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0 github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause @@ -49,7 +49,6 @@ require ( github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - github.com/hashicorp/go-retryablehttp v0.7.7 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect diff --git a/go.sum b/go.sum index b2985955c..f55f329f3 100644 --- a/go.sum +++ b/go.sum @@ -99,14 +99,10 @@ github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-hclog v1.6.3 h1:Qr2kF+eVWjTiYmU7Y31tYlP1h0q/X3Nl3tPGdaB11/k= -github.com/hashicorp/go-hclog v1.6.3/go.mod h1:W4Qnvbt70Wk/zYJryRzDRU/4r0kIg0PVHBcfoyhpF5M= -github.com/hashicorp/go-retryablehttp v0.7.7 h1:C8hUCYzor8PIfXHa4UrZkU4VvK8o9ISHxT2Q8+VepXU= -github.com/hashicorp/go-retryablehttp v0.7.7/go.mod h1:pkQpWZeYWskR+D1tR2O5OcBFOxfA7DoAO6xtkuQnHTk= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.8.0 h1:LdpZeXkZYMQhoKPCecJHlKvUkQFixN/nvyR1CdfOLjI= -github.com/hashicorp/hc-install v0.8.0/go.mod h1:+MwJYjDfCruSD/udvBmRB22Nlkwwkwf5sAB6uTIhSaU= +github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= +github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= diff --git a/internal/bundle/deploy_test.go b/internal/bundle/deploy_test.go index 3da885705..269b7c80a 100644 --- a/internal/bundle/deploy_test.go +++ b/internal/bundle/deploy_test.go @@ -13,6 +13,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal" "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/catalog" @@ -123,3 +124,33 @@ func TestAccBundleDeployUcSchemaFailsWithoutAutoApprove(t *testing.T) { assert.EqualError(t, err, root.ErrAlreadyPrinted.Error()) assert.Contains(t, stdout.String(), "the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed") } + +func TestAccDeployBasicBundleLogs(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + uniqueId := uuid.New().String() + root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, + }) + require.NoError(t, err) + + t.Cleanup(func() { + err = destroyBundle(t, ctx, root) + require.NoError(t, err) + }) + + currentUser, err := wt.W.CurrentUser.Me(ctx) + require.NoError(t, err) + + stdout, stderr := blackBoxRun(t, root, "bundle", "deploy") + assert.Equal(t, strings.Join([]string{ + fmt.Sprintf("Uploading bundle files to /Users/%s/.bundle/%s/files...", currentUser.UserName, uniqueId), + "Deploying resources...", + "Updating deployment state...", + "Deployment complete!\n", + }, "\n"), stderr) + assert.Equal(t, "", stdout) +} diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 03d9cff70..3547c1755 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -1,10 +1,12 @@ package bundle import ( + "bytes" "context" "encoding/json" "fmt" "os" + "os/exec" "path/filepath" "strings" "testing" @@ -15,6 +17,7 @@ import ( "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/template" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/require" ) @@ -114,3 +117,29 @@ func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, unique root := fmt.Sprintf("/Users/%s/.bundle/%s", me.UserName, uniqueId) return root } + +func blackBoxRun(t *testing.T, root string, args ...string) (stdout string, stderr string) { + cwd := vfs.MustNew(".") + gitRoot, err := vfs.FindLeafInTree(cwd, ".git") + require.NoError(t, err) + + t.Setenv("BUNDLE_ROOT", root) + + // Create the command + cmd := exec.Command("go", append([]string{"run", "main.go"}, args...)...) + cmd.Dir = gitRoot.Native() + + // Create buffers to capture output + var outBuffer, errBuffer bytes.Buffer + cmd.Stdout = &outBuffer + cmd.Stderr = &errBuffer + + // Run the command + err = cmd.Run() + require.NoError(t, err) + + // Get the output + stdout = outBuffer.String() + stderr = errBuffer.String() + return +} From 84b47745e451f6552465243665ad6c897c55ae5e Mon Sep 17 00:00:00 2001 From: "Lennart Kats (databricks)" Date: Fri, 23 Aug 2024 12:13:21 +0200 Subject: [PATCH 33/36] Ignore CLI version check on development builds of the CLI (#1714) ## Changes This changes makes sure we ignore CLI version check on development builds of the CLI. Before: ``` $ cat databricks.yml | grep cli_version databricks_cli_version: ">= 0.223.1" $ cli bundle deploy Error: Databricks CLI version constraint not satisfied. Required: >= 0.223.1, current: 0.0.0-dev+06b169284737 ``` after ``` ... $ cli bundle deploy ... Warning: Ignoring Databricks CLI version constraint for development build. Required: >= 0.223.1, current: 0.0.0-dev+d52d6f08fcd5 ``` ## Tests --- bundle/config/mutator/verify_cli_version.go | 4 ++++ bundle/config/mutator/verify_cli_version_test.go | 7 ++++++- 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/bundle/config/mutator/verify_cli_version.go b/bundle/config/mutator/verify_cli_version.go index 9c32fcc9d..279af44e6 100644 --- a/bundle/config/mutator/verify_cli_version.go +++ b/bundle/config/mutator/verify_cli_version.go @@ -40,6 +40,10 @@ func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Dia } if !c.Check(version) { + if version.Prerelease() == "dev" && version.Major() == 0 { + return diag.Warningf("Ignoring Databricks CLI version constraint for development build. Required: %s, current: %s", constraint, currentVersion) + } + return diag.Errorf("Databricks CLI version constraint not satisfied. Required: %s, current: %s", constraint, currentVersion) } diff --git a/bundle/config/mutator/verify_cli_version_test.go b/bundle/config/mutator/verify_cli_version_test.go index 24f656745..025461292 100644 --- a/bundle/config/mutator/verify_cli_version_test.go +++ b/bundle/config/mutator/verify_cli_version_test.go @@ -107,6 +107,11 @@ func TestVerifyCliVersion(t *testing.T) { constraint: "^0.100", expectedError: "invalid version constraint \"^0.100\" specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)", }, + { + currentVersion: "0.0.0-dev+06b169284737", + constraint: ">= 0.100.0", + expectedError: "Ignoring Databricks CLI version constraint for development build. Required: >= 0.100.0", + }, } t.Cleanup(func() { @@ -130,7 +135,7 @@ func TestVerifyCliVersion(t *testing.T) { diags := bundle.Apply(context.Background(), b, VerifyCliVersion()) if tc.expectedError != "" { require.NotEmpty(t, diags) - require.Equal(t, tc.expectedError, diags.Error().Error()) + require.Contains(t, diags[0].Summary, tc.expectedError) } else { require.Empty(t, diags) } From 783e05c939a694fe722e52ddea9c48f0ea077181 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Mon, 26 Aug 2024 12:03:56 +0200 Subject: [PATCH 34/36] Do not treat empty path as a local path (#1717) ## Changes Fixes issue introduced here https://github.com/databricks/cli/pull/1699 where PyPi packages were treated as local library. The reason is that `libraryPath` returns an empty string as a path for PyPi packages and then `IsLibraryLocal` treated empty string as local path. Both of these functions are fixed in this PR. ## Tests Added regression test --- bundle/libraries/helpers.go | 19 +++++++---- bundle/libraries/helpers_test.go | 28 +++++++++++++--- bundle/libraries/libraries.go | 7 +++- bundle/libraries/local_path.go | 4 +++ bundle/libraries/local_path_test.go | 1 + bundle/libraries/workspace_path.go | 4 +-- bundle/python/warning_test.go | 51 +++++++++++++++++++++++++++++ 7 files changed, 99 insertions(+), 15 deletions(-) diff --git a/bundle/libraries/helpers.go b/bundle/libraries/helpers.go index b7e707ccf..2149e5885 100644 --- a/bundle/libraries/helpers.go +++ b/bundle/libraries/helpers.go @@ -1,19 +1,24 @@ package libraries -import "github.com/databricks/databricks-sdk-go/service/compute" +import ( + "fmt" -func libraryPath(library *compute.Library) string { + "github.com/databricks/databricks-sdk-go/service/compute" +) + +func libraryPath(library *compute.Library) (string, error) { if library.Whl != "" { - return library.Whl + return library.Whl, nil } if library.Jar != "" { - return library.Jar + return library.Jar, nil } if library.Egg != "" { - return library.Egg + return library.Egg, nil } if library.Requirements != "" { - return library.Requirements + return library.Requirements, nil } - return "" + + return "", fmt.Errorf("not supported library type") } diff --git a/bundle/libraries/helpers_test.go b/bundle/libraries/helpers_test.go index e4bd32770..9d7e12ee5 100644 --- a/bundle/libraries/helpers_test.go +++ b/bundle/libraries/helpers_test.go @@ -10,9 +10,27 @@ import ( func TestLibraryPath(t *testing.T) { path := "/some/path" - assert.Equal(t, path, libraryPath(&compute.Library{Whl: path})) - assert.Equal(t, path, libraryPath(&compute.Library{Jar: path})) - assert.Equal(t, path, libraryPath(&compute.Library{Egg: path})) - assert.Equal(t, path, libraryPath(&compute.Library{Requirements: path})) - assert.Equal(t, "", libraryPath(&compute.Library{})) + p, err := libraryPath(&compute.Library{Whl: path}) + assert.Equal(t, path, p) + assert.Nil(t, err) + + p, err = libraryPath(&compute.Library{Jar: path}) + assert.Equal(t, path, p) + assert.Nil(t, err) + + p, err = libraryPath(&compute.Library{Egg: path}) + assert.Equal(t, path, p) + assert.Nil(t, err) + + p, err = libraryPath(&compute.Library{Requirements: path}) + assert.Equal(t, path, p) + assert.Nil(t, err) + + p, err = libraryPath(&compute.Library{}) + assert.Equal(t, "", p) + assert.NotNil(t, err) + + p, err = libraryPath(&compute.Library{Pypi: &compute.PythonPyPiLibrary{Package: "pypipackage"}}) + assert.Equal(t, "", p) + assert.NotNil(t, err) } diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 33b848dd9..f75e23a8c 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -67,7 +67,12 @@ func FindTasksWithLocalLibraries(b *bundle.Bundle) []jobs.Task { func isTaskWithLocalLibraries(task jobs.Task) bool { for _, l := range task.Libraries { - if IsLibraryLocal(libraryPath(&l)) { + p, err := libraryPath(&l) + // If there's an error, skip the library because it's not of supported type + if err != nil { + continue + } + if IsLibraryLocal(p) { return true } } diff --git a/bundle/libraries/local_path.go b/bundle/libraries/local_path.go index 417bce10e..e49562405 100644 --- a/bundle/libraries/local_path.go +++ b/bundle/libraries/local_path.go @@ -43,6 +43,10 @@ func IsLocalPath(p string) bool { // We can't use IsLocalPath beacuse environment dependencies can be // a pypi package name which can be misinterpreted as a local path by IsLocalPath. func IsLibraryLocal(dep string) bool { + if dep == "" { + return false + } + possiblePrefixes := []string{ ".", } diff --git a/bundle/libraries/local_path_test.go b/bundle/libraries/local_path_test.go index 7f84b3244..667d64ec8 100644 --- a/bundle/libraries/local_path_test.go +++ b/bundle/libraries/local_path_test.go @@ -48,6 +48,7 @@ func TestIsLibraryLocal(t *testing.T) { {path: "../../local/*.whl", expected: true}, {path: "..\\..\\local\\*.whl", expected: true}, {path: "file://path/to/package/whl.whl", expected: true}, + {path: "", expected: false}, {path: "pypipackage", expected: false}, {path: "/Volumes/catalog/schema/volume/path.whl", expected: false}, {path: "/Workspace/my_project/dist.whl", expected: false}, diff --git a/bundle/libraries/workspace_path.go b/bundle/libraries/workspace_path.go index b08ca1616..126ad3f13 100644 --- a/bundle/libraries/workspace_path.go +++ b/bundle/libraries/workspace_path.go @@ -29,8 +29,8 @@ func IsWorkspacePath(path string) bool { // IsWorkspaceLibrary returns true if the specified library refers to a workspace path. func IsWorkspaceLibrary(library *compute.Library) bool { - path := libraryPath(library) - if path == "" { + path, err := libraryPath(library) + if err != nil { return false } diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go index dd6397f78..b2296392b 100644 --- a/bundle/python/warning_test.go +++ b/bundle/python/warning_test.go @@ -223,6 +223,17 @@ func TestNoIncompatibleWheelTasks(t *testing.T) { {Whl: "./dist/test.whl"}, }, }, + { + TaskKey: "key7", + PythonWheelTask: &jobs.PythonWheelTask{}, + ExistingClusterId: "test-key-2", + Libraries: []compute.Library{ + {Whl: "signol_lib-0.4.4-20240822+prod-py3-none-any.whl"}, + {Pypi: &compute.PythonPyPiLibrary{ + Package: "requests==2.25.1", + }}, + }, + }, }, }, }, @@ -241,6 +252,46 @@ func TestNoIncompatibleWheelTasks(t *testing.T) { require.False(t, hasIncompatibleWheelTasks(context.Background(), b)) } +func TestTasksWithPyPiPackageAreCompatible(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + JobClusterKey: "cluster1", + NewCluster: compute.ClusterSpec{ + SparkVersion: "12.2.x-scala2.12", + }, + }, + }, + Tasks: []jobs.Task{ + { + TaskKey: "key1", + PythonWheelTask: &jobs.PythonWheelTask{}, + ExistingClusterId: "test-key-2", + Libraries: []compute.Library{ + {Pypi: &compute.PythonPyPiLibrary{ + Package: "requests==2.25.1", + }}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + require.False(t, hasIncompatibleWheelTasks(context.Background(), b)) +} + func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ From 056d2032368ead1e3a7e65f9304508498bc53403 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 27 Aug 2024 10:54:05 +0200 Subject: [PATCH 35/36] Bump github.com/databricks/databricks-sdk-go from 0.44.0 to 0.45.0 (#1719) Bumps [github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go) from 0.44.0 to 0.45.0.
Release notes

Sourced from github.com/databricks/databricks-sdk-go's releases.

v0.45.0

0.45.0

Bug Fixes

  • Add INVALID_STATE to error code mapping (#1014).
  • Do not specify --tenant flag when fetching managed identity access token from the CLI (#1021).

Internal Changes

  • Add terraform aliases to Entity (#1017).
  • Added Service.NamedIdMap (#1016).
  • Fix billing test for budget configuration update (#1019).

API Changes:

OpenAPI SHA: 3eae49b444cac5a0118a3503e5b7ecef7f96527a, Date: 2024-08-21

Changelog

Sourced from github.com/databricks/databricks-sdk-go's changelog.

[Release] Release v0.45.0

Bug Fixes

  • Add INVALID_STATE to error code mapping (#1014).
  • Do not specify --tenant flag when fetching managed identity access token from the CLI (#1021).

Internal Changes

  • Add terraform aliases to Entity (#1017).
  • Added Service.NamedIdMap (#1016).
  • Fix billing test for budget configuration update (#1019).

API Changes:

OpenAPI SHA: 3eae49b444cac5a0118a3503e5b7ecef7f96527a, Date: 2024-08-21

Commits
  • 6d86788 [Release] Release v0.45.0 (#1023)
  • ba4489b [Fix] Do not specify --tenant flag when fetching managed identity access to...
  • f624809 [Internal] Fix billing test for budget configuration update (#1019)
  • 27a5055 [Internal] Add terraform aliases to Entity (#1017)
  • 382a38d [Internal] Added Service.NamedIdMap (#1016)
  • 1ef9931 [Fix] Add INVALID_STATE to error code mapping (#1014)
  • See full diff in compare view

Most Recent Ignore Conditions Applied to This Pull Request | Dependency Name | Ignore Conditions | | --- | --- | | github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
[![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.44.0&new-version=0.45.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) ---
Dependabot commands and options
You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself)
--------- Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester --- .codegen/_openapi_sha | 2 +- .gitattributes | 3 + bundle/schema/docs/bundle_descriptions.json | 64 +++++ cmd/workspace/cmd.go | 6 + .../external-locations/external-locations.go | 2 + .../policy-compliance-for-clusters.go | 260 +++++++++++++++++ .../policy-compliance-for-jobs.go | 262 ++++++++++++++++++ cmd/workspace/query-history/query-history.go | 8 +- .../resource-quotas/resource-quotas.go | 168 +++++++++++ go.mod | 2 +- go.sum | 4 +- 11 files changed, 773 insertions(+), 8 deletions(-) create mode 100755 cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go create mode 100755 cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go create mode 100755 cmd/workspace/resource-quotas/resource-quotas.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index fef6f268b..8b01a2422 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -f98c07f9c71f579de65d2587bb0292f83d10e55d \ No newline at end of file +3eae49b444cac5a0118a3503e5b7ecef7f96527a \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index bdb3f3982..d82ab7696 100755 --- a/.gitattributes +++ b/.gitattributes @@ -75,6 +75,8 @@ cmd/workspace/online-tables/online-tables.go linguist-generated=true cmd/workspace/permission-migration/permission-migration.go linguist-generated=true cmd/workspace/permissions/permissions.go linguist-generated=true cmd/workspace/pipelines/pipelines.go linguist-generated=true +cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go linguist-generated=true +cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go linguist-generated=true cmd/workspace/policy-families/policy-families.go linguist-generated=true cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true @@ -94,6 +96,7 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr cmd/workspace/recipients/recipients.go linguist-generated=true cmd/workspace/registered-models/registered-models.go linguist-generated=true cmd/workspace/repos/repos.go linguist-generated=true +cmd/workspace/resource-quotas/resource-quotas.go linguist-generated=true cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true cmd/workspace/schemas/schemas.go linguist-generated=true cmd/workspace/secrets/secrets.go linguist-generated=true diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index d888b3663..908a1c2ba 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -85,6 +85,12 @@ "enabled": { "description": "" }, + "import": { + "description": "", + "items": { + "description": "" + } + }, "venv_path": { "description": "" } @@ -130,6 +136,29 @@ } } }, + "presets": { + "description": "", + "properties": { + "jobs_max_concurrent_runs": { + "description": "" + }, + "name_prefix": { + "description": "" + }, + "pipelines_development": { + "description": "" + }, + "tags": { + "description": "", + "additionalproperties": { + "description": "" + } + }, + "trigger_pause_status": { + "description": "" + } + } + }, "resources": { "description": "Collection of Databricks resources to deploy.", "properties": { @@ -3079,6 +3108,12 @@ "items": { "description": "" } + }, + "paths": { + "description": "", + "items": { + "description": "" + } } } }, @@ -3202,6 +3237,29 @@ } } }, + "presets": { + "description": "", + "properties": { + "jobs_max_concurrent_runs": { + "description": "" + }, + "name_prefix": { + "description": "" + }, + "pipelines_development": { + "description": "" + }, + "tags": { + "description": "", + "additionalproperties": { + "description": "" + } + }, + "trigger_pause_status": { + "description": "" + } + } + }, "resources": { "description": "Collection of Databricks resources to deploy.", "properties": { @@ -6151,6 +6209,12 @@ "items": { "description": "" } + }, + "paths": { + "description": "", + "items": { + "description": "" + } } } }, diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index 75664c79c..11be8077a 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -44,6 +44,8 @@ import ( permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration" permissions "github.com/databricks/cli/cmd/workspace/permissions" pipelines "github.com/databricks/cli/cmd/workspace/pipelines" + policy_compliance_for_clusters "github.com/databricks/cli/cmd/workspace/policy-compliance-for-clusters" + policy_compliance_for_jobs "github.com/databricks/cli/cmd/workspace/policy-compliance-for-jobs" policy_families "github.com/databricks/cli/cmd/workspace/policy-families" provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters" provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges" @@ -63,6 +65,7 @@ import ( recipients "github.com/databricks/cli/cmd/workspace/recipients" registered_models "github.com/databricks/cli/cmd/workspace/registered-models" repos "github.com/databricks/cli/cmd/workspace/repos" + resource_quotas "github.com/databricks/cli/cmd/workspace/resource-quotas" schemas "github.com/databricks/cli/cmd/workspace/schemas" secrets "github.com/databricks/cli/cmd/workspace/secrets" service_principals "github.com/databricks/cli/cmd/workspace/service-principals" @@ -130,6 +133,8 @@ func All() []*cobra.Command { out = append(out, permission_migration.New()) out = append(out, permissions.New()) out = append(out, pipelines.New()) + out = append(out, policy_compliance_for_clusters.New()) + out = append(out, policy_compliance_for_jobs.New()) out = append(out, policy_families.New()) out = append(out, provider_exchange_filters.New()) out = append(out, provider_exchanges.New()) @@ -149,6 +154,7 @@ func All() []*cobra.Command { out = append(out, recipients.New()) out = append(out, registered_models.New()) out = append(out, repos.New()) + out = append(out, resource_quotas.New()) out = append(out, schemas.New()) out = append(out, secrets.New()) out = append(out, service_principals.New()) diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index 8f0dd346a..42493fc46 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -75,6 +75,7 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.AccessPoint, "access-point", createReq.AccessPoint, `The AWS access point to use when accesing s3 for this external location.`) cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) // TODO: complex arg: encryption_details + cmd.Flags().BoolVar(&createReq.Fallback, "fallback", createReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`) cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Indicates whether the external location is read-only.`) cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`) @@ -347,6 +348,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`) // TODO: complex arg: encryption_details + cmd.Flags().BoolVar(&updateReq.Fallback, "fallback", updateReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`) cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`) cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`) diff --git a/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go b/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go new file mode 100755 index 000000000..1274c8790 --- /dev/null +++ b/cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go @@ -0,0 +1,260 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package policy_compliance_for_clusters + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "policy-compliance-for-clusters", + Short: `The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace.`, + Long: `The policy compliance APIs allow you to view and manage the policy compliance + status of clusters in your workspace. + + A cluster is compliant with its policy if its configuration satisfies all its + policy rules. Clusters could be out of compliance if their policy was updated + after the cluster was last edited. + + The get and list compliance APIs allow you to view the policy compliance + status of a cluster. The enforce compliance API allows you to update a cluster + to be compliant with the current version of its policy.`, + GroupID: "compute", + Annotations: map[string]string{ + "package": "compute", + }, + } + + // Add methods + cmd.AddCommand(newEnforceCompliance()) + cmd.AddCommand(newGetCompliance()) + cmd.AddCommand(newListCompliance()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start enforce-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var enforceComplianceOverrides []func( + *cobra.Command, + *compute.EnforceClusterComplianceRequest, +) + +func newEnforceCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var enforceComplianceReq compute.EnforceClusterComplianceRequest + var enforceComplianceJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews the changes that would be made to a cluster to enforce compliance but does not update the cluster.`) + + cmd.Use = "enforce-compliance CLUSTER_ID" + cmd.Short = `Enforce cluster policy compliance.` + cmd.Long = `Enforce cluster policy compliance. + + Updates a cluster to be compliant with the current version of its policy. A + cluster can be updated if it is in a RUNNING or TERMINATED state. + + If a cluster is updated while in a RUNNING state, it will be restarted so + that the new attributes can take effect. + + If a cluster is updated while in a TERMINATED state, it will remain + TERMINATED. The next time the cluster is started, the new attributes will + take effect. + + Clusters created by the Databricks Jobs, DLT, or Models services cannot be + enforced by this API. Instead, use the "Enforce job policy compliance" API to + enforce policy compliance on jobs. + + Arguments: + CLUSTER_ID: The ID of the cluster you want to enforce policy compliance on.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = enforceComplianceJson.Unmarshal(&enforceComplianceReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + enforceComplianceReq.ClusterId = args[0] + } + + response, err := w.PolicyComplianceForClusters.EnforceCompliance(ctx, enforceComplianceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range enforceComplianceOverrides { + fn(cmd, &enforceComplianceReq) + } + + return cmd +} + +// start get-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getComplianceOverrides []func( + *cobra.Command, + *compute.GetClusterComplianceRequest, +) + +func newGetCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var getComplianceReq compute.GetClusterComplianceRequest + + // TODO: short flags + + cmd.Use = "get-compliance CLUSTER_ID" + cmd.Short = `Get cluster policy compliance.` + cmd.Long = `Get cluster policy compliance. + + Returns the policy compliance status of a cluster. Clusters could be out of + compliance if their policy was updated after the cluster was last edited. + + Arguments: + CLUSTER_ID: The ID of the cluster to get the compliance status` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getComplianceReq.ClusterId = args[0] + + response, err := w.PolicyComplianceForClusters.GetCompliance(ctx, getComplianceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getComplianceOverrides { + fn(cmd, &getComplianceReq) + } + + return cmd +} + +// start list-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listComplianceOverrides []func( + *cobra.Command, + *compute.ListClusterCompliancesRequest, +) + +func newListCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var listComplianceReq compute.ListClusterCompliancesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`) + cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`) + + cmd.Use = "list-compliance POLICY_ID" + cmd.Short = `List cluster policy compliance.` + cmd.Long = `List cluster policy compliance. + + Returns the policy compliance status of all clusters that use a given policy. + Clusters could be out of compliance if their policy was updated after the + cluster was last edited. + + Arguments: + POLICY_ID: Canonical unique identifier for the cluster policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listComplianceReq.PolicyId = args[0] + + response := w.PolicyComplianceForClusters.ListCompliance(ctx, listComplianceReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listComplianceOverrides { + fn(cmd, &listComplianceReq) + } + + return cmd +} + +// end service PolicyComplianceForClusters diff --git a/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go new file mode 100755 index 000000000..d74caa572 --- /dev/null +++ b/cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go @@ -0,0 +1,262 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package policy_compliance_for_jobs + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "policy-compliance-for-jobs", + Short: `The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace.`, + Long: `The compliance APIs allow you to view and manage the policy compliance status + of jobs in your workspace. This API currently only supports compliance + controls for cluster policies. + + A job is in compliance if its cluster configurations satisfy the rules of all + their respective cluster policies. A job could be out of compliance if a + cluster policy it uses was updated after the job was last edited. The job is + considered out of compliance if any of its clusters no longer comply with + their updated policies. + + The get and list compliance APIs allow you to view the policy compliance + status of a job. The enforce compliance API allows you to update a job so that + it becomes compliant with all of its policies.`, + GroupID: "jobs", + Annotations: map[string]string{ + "package": "jobs", + }, + } + + // Add methods + cmd.AddCommand(newEnforceCompliance()) + cmd.AddCommand(newGetCompliance()) + cmd.AddCommand(newListCompliance()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start enforce-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var enforceComplianceOverrides []func( + *cobra.Command, + *jobs.EnforcePolicyComplianceRequest, +) + +func newEnforceCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var enforceComplianceReq jobs.EnforcePolicyComplianceRequest + var enforceComplianceJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews changes made to the job to comply with its policy, but does not update the job.`) + + cmd.Use = "enforce-compliance JOB_ID" + cmd.Short = `Enforce job policy compliance.` + cmd.Long = `Enforce job policy compliance. + + Updates a job so the job clusters that are created when running the job + (specified in new_cluster) are compliant with the current versions of their + respective cluster policies. All-purpose clusters used in the job will not be + updated. + + Arguments: + JOB_ID: The ID of the job you want to enforce policy compliance on.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = enforceComplianceJson.Unmarshal(&enforceComplianceReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[0], &enforceComplianceReq.JobId) + if err != nil { + return fmt.Errorf("invalid JOB_ID: %s", args[0]) + } + } + + response, err := w.PolicyComplianceForJobs.EnforceCompliance(ctx, enforceComplianceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range enforceComplianceOverrides { + fn(cmd, &enforceComplianceReq) + } + + return cmd +} + +// start get-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getComplianceOverrides []func( + *cobra.Command, + *jobs.GetPolicyComplianceRequest, +) + +func newGetCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var getComplianceReq jobs.GetPolicyComplianceRequest + + // TODO: short flags + + cmd.Use = "get-compliance JOB_ID" + cmd.Short = `Get job policy compliance.` + cmd.Long = `Get job policy compliance. + + Returns the policy compliance status of a job. Jobs could be out of compliance + if a cluster policy they use was updated after the job was last edited and + some of its job clusters no longer comply with their updated policies. + + Arguments: + JOB_ID: The ID of the job whose compliance status you are requesting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + _, err = fmt.Sscan(args[0], &getComplianceReq.JobId) + if err != nil { + return fmt.Errorf("invalid JOB_ID: %s", args[0]) + } + + response, err := w.PolicyComplianceForJobs.GetCompliance(ctx, getComplianceReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getComplianceOverrides { + fn(cmd, &getComplianceReq) + } + + return cmd +} + +// start list-compliance command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listComplianceOverrides []func( + *cobra.Command, + *jobs.ListJobComplianceRequest, +) + +func newListCompliance() *cobra.Command { + cmd := &cobra.Command{} + + var listComplianceReq jobs.ListJobComplianceRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`) + cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`) + + cmd.Use = "list-compliance POLICY_ID" + cmd.Short = `List job policy compliance.` + cmd.Long = `List job policy compliance. + + Returns the policy compliance status of all jobs that use a given policy. Jobs + could be out of compliance if a cluster policy they use was updated after the + job was last edited and its job clusters no longer comply with the updated + policy. + + Arguments: + POLICY_ID: Canonical unique identifier for the cluster policy.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listComplianceReq.PolicyId = args[0] + + response := w.PolicyComplianceForJobs.ListCompliance(ctx, listComplianceReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listComplianceOverrides { + fn(cmd, &listComplianceReq) + } + + return cmd +} + +// end service PolicyComplianceForJobs diff --git a/cmd/workspace/query-history/query-history.go b/cmd/workspace/query-history/query-history.go index 5155b5cc0..bfa013f28 100755 --- a/cmd/workspace/query-history/query-history.go +++ b/cmd/workspace/query-history/query-history.go @@ -16,9 +16,9 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "query-history", - Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints, serverless compute, and DLT.`, + Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.`, Long: `A service responsible for storing and retrieving the list of queries run - against SQL endpoints, serverless compute, and DLT.`, + against SQL endpoints and serverless compute.`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -53,6 +53,7 @@ func newList() *cobra.Command { // TODO: short flags // TODO: complex arg: filter_by + cmd.Flags().BoolVar(&listReq.IncludeMetrics, "include-metrics", listReq.IncludeMetrics, `Whether to include the query metrics with each query.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Limit the number of results returned in one page.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`) @@ -60,8 +61,7 @@ func newList() *cobra.Command { cmd.Short = `List Queries.` cmd.Long = `List Queries. - List the history of queries through SQL warehouses, serverless compute, and - DLT. + List the history of queries through SQL warehouses, and serverless compute. You can filter by user ID, warehouse ID, status, and time range. Most recently started queries are returned first (up to max_results in request). The diff --git a/cmd/workspace/resource-quotas/resource-quotas.go b/cmd/workspace/resource-quotas/resource-quotas.go new file mode 100755 index 000000000..9a0c30687 --- /dev/null +++ b/cmd/workspace/resource-quotas/resource-quotas.go @@ -0,0 +1,168 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package resource_quotas + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "resource-quotas", + Short: `Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created.`, + Long: `Unity Catalog enforces resource quotas on all securable objects, which limits + the number of resources that can be created. Quotas are expressed in terms of + a resource type and a parent (for example, tables per metastore or schemas per + catalog). The resource quota APIs enable you to monitor your current usage and + limits. For more information on resource quotas see the [Unity Catalog + documentation]. + + [Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + } + + // Add methods + cmd.AddCommand(newGetQuota()) + cmd.AddCommand(newListQuotas()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get-quota command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getQuotaOverrides []func( + *cobra.Command, + *catalog.GetQuotaRequest, +) + +func newGetQuota() *cobra.Command { + cmd := &cobra.Command{} + + var getQuotaReq catalog.GetQuotaRequest + + // TODO: short flags + + cmd.Use = "get-quota PARENT_SECURABLE_TYPE PARENT_FULL_NAME QUOTA_NAME" + cmd.Short = `Get information for a single resource quota.` + cmd.Long = `Get information for a single resource quota. + + The GetQuota API returns usage information for a single resource quota, + defined as a child-parent pair. This API also refreshes the quota count if it + is out of date. Refreshes are triggered asynchronously. The updated count + might not be returned in the first call. + + Arguments: + PARENT_SECURABLE_TYPE: Securable type of the quota parent. + PARENT_FULL_NAME: Full name of the parent resource. Provide the metastore ID if the parent + is a metastore. + QUOTA_NAME: Name of the quota. Follows the pattern of the quota type, with "-quota" + added as a suffix.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getQuotaReq.ParentSecurableType = args[0] + getQuotaReq.ParentFullName = args[1] + getQuotaReq.QuotaName = args[2] + + response, err := w.ResourceQuotas.GetQuota(ctx, getQuotaReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getQuotaOverrides { + fn(cmd, &getQuotaReq) + } + + return cmd +} + +// start list-quotas command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listQuotasOverrides []func( + *cobra.Command, + *catalog.ListQuotasRequest, +) + +func newListQuotas() *cobra.Command { + cmd := &cobra.Command{} + + var listQuotasReq catalog.ListQuotasRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listQuotasReq.MaxResults, "max-results", listQuotasReq.MaxResults, `The number of quotas to return.`) + cmd.Flags().StringVar(&listQuotasReq.PageToken, "page-token", listQuotasReq.PageToken, `Opaque token for the next page of results.`) + + cmd.Use = "list-quotas" + cmd.Short = `List all resource quotas under a metastore.` + cmd.Long = `List all resource quotas under a metastore. + + ListQuotas returns all quota values under the metastore. There are no SLAs on + the freshness of the counts returned. This API does not trigger a refresh of + quota counts.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ResourceQuotas.ListQuotas(ctx, listQuotasReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listQuotasOverrides { + fn(cmd, &listQuotasReq) + } + + return cmd +} + +// end service ResourceQuotas diff --git a/go.mod b/go.mod index 838a45f36..4aa279921 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,7 @@ go 1.22 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT github.com/briandowns/spinner v1.23.1 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.44.0 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause diff --git a/go.sum b/go.sum index f55f329f3..2e58948aa 100644 --- a/go.sum +++ b/go.sum @@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.44.0 h1:9/FZACv4EFQIOYxfwYVKnY7v46xio9FKCw9tpKB2O/s= -github.com/databricks/databricks-sdk-go v0.44.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= +github.com/databricks/databricks-sdk-go v0.45.0 h1:wdx5Wm/ESrahdHeq62WrjLeGjV4r722LLanD8ahI0Mo= +github.com/databricks/databricks-sdk-go v0.45.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= From edc08149d3d6a4943072f23492fce494082edf90 Mon Sep 17 00:00:00 2001 From: Andrew Nester Date: Tue, 27 Aug 2024 14:51:10 +0200 Subject: [PATCH 36/36] Disable prompt for storage-credentials get command (#1723) ## Changes Fixes #1079 --- .codegen/service.go.tmpl | 1 + .../storage-credentials.go | 22 +++++-------------- 2 files changed, 6 insertions(+), 17 deletions(-) diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index 111745e4f..281dfd6eb 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -154,6 +154,7 @@ func new{{.PascalName}}() *cobra.Command { "provider-exchanges delete-listing-from-exchange" "provider-exchanges list-exchanges-for-listing" "provider-exchanges list-listings-for-exchange" + "storage-credentials get" -}} {{- $fullCommandName := (print $serviceName " " .KebabName) -}} {{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }} diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 18656a61c..f4ec5eb4f 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -241,28 +241,16 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NAME argument specified. Loading names for Storage Credentials drop-down." - names, err := w.StorageCredentials.StorageCredentialInfoNameToIdMap(ctx, catalog.ListStorageCredentialsRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Storage Credentials drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "Name of the storage credential") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have name of the storage credential") - } getReq.Name = args[0] response, err := w.StorageCredentials.Get(ctx, getReq)