diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 8c62ac620..c4b47ca14 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92 \ No newline at end of file +7437dabb9dadee402c1fc060df4c1ce8cc5369f0 \ No newline at end of file diff --git a/.codegen/cmds-account.go.tmpl b/.codegen/cmds-account.go.tmpl index 24b6bdd7c..43834b698 100644 --- a/.codegen/cmds-account.go.tmpl +++ b/.codegen/cmds-account.go.tmpl @@ -7,7 +7,7 @@ package account import ( "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" - {{range .Services}}{{if and .IsAccounts (not .HasParent)}}{{if not (in $excludes .KebabName) }} + {{range .Services}}{{if and .IsAccounts (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) }} {{.SnakeName}} "github.com/databricks/cli/cmd/account/{{(.TrimPrefix "account").KebabName}}"{{end}}{{end}}{{end}} ) @@ -17,7 +17,7 @@ func New() *cobra.Command { Short: `Databricks Account Commands`, } - {{range .Services}}{{if and .IsAccounts (not .HasParent)}}{{if not (in $excludes .KebabName) -}} + {{range .Services}}{{if and .IsAccounts (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) -}} cmd.AddCommand({{.SnakeName}}.New()) {{end}}{{end}}{{end}} diff --git a/.codegen/cmds-workspace.go.tmpl b/.codegen/cmds-workspace.go.tmpl index 244dde61a..e29f05a55 100644 --- a/.codegen/cmds-workspace.go.tmpl +++ b/.codegen/cmds-workspace.go.tmpl @@ -14,14 +14,14 @@ package workspace import ( "github.com/databricks/cli/cmd/root" - {{range .Services}}{{if and (not .IsAccounts) (not .HasParent)}}{{if not (in $excludes .KebabName) }} + {{range .Services}}{{if and (not .IsAccounts) (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) }} {{.SnakeName}} "github.com/databricks/cli/cmd/workspace/{{.KebabName}}"{{end}}{{end}}{{end}} ) func All() []*cobra.Command { var out []*cobra.Command - {{range .Services}}{{if and (not .IsAccounts) (not .HasParent)}}{{if not (in $excludes .KebabName) -}} + {{range .Services}}{{if and (not .IsAccounts) (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) -}} out = append(out, {{.SnakeName}}.New()) {{end}}{{end}}{{end}} diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index ad482ebe6..111745e4f 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -22,6 +22,7 @@ import ( "dbsql-permissions" "account-access-control-proxy" "files" + "serving-endpoints-data-plane" }} {{if not (in $excludes .KebabName) }} diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 244bdeee5..08edfb9da 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -33,7 +33,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x - name: Setup Python uses: actions/setup-python@v5 @@ -68,7 +68,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x # No need to download cached dependencies when running gofmt. cache: false @@ -100,7 +100,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x # Github repo: https://github.com/ajv-validator/ajv-cli - name: Install ajv-cli diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index bd89417e2..defd1c535 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -21,33 +21,41 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x + + # The default cache key for this action considers only the `go.sum` file. + # We include .goreleaser.yaml here to differentiate from the cache used by the push action + # that runs unit tests. This job produces and uses a different cache. + cache-dependency-path: | + go.sum + .goreleaser.yaml - name: Hide snapshot tag to outsmart GoReleaser run: git tag -d snapshot || true - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v4 + id: releaser + uses: goreleaser/goreleaser-action@v6 with: - version: latest + version: ~> v2 args: release --snapshot --skip docker - name: Upload macOS binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cli_darwin_snapshot path: | dist/*_darwin_*/ - name: Upload Linux binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cli_linux_snapshot path: | dist/*_linux_*/ - name: Upload Windows binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cli_windows_snapshot path: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 8643ac355..531fb39bf 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -22,7 +22,14 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x + + # The default cache key for this action considers only the `go.sum` file. + # We include .goreleaser.yaml here to differentiate from the cache used by the push action + # that runs unit tests. This job produces and uses a different cache. + cache-dependency-path: | + go.sum + .goreleaser.yaml # Log into the GitHub Container Registry. The goreleaser action will create # the docker images and push them to the GitHub Container Registry. @@ -39,9 +46,9 @@ jobs: - name: Run GoReleaser id: releaser - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v6 with: - version: latest + version: ~> v2 args: release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.goreleaser.yaml b/.goreleaser.yaml index d37876edb..3f0bdb2c5 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -1,6 +1,9 @@ +version: 2 + before: hooks: - - go mod tidy + - go mod download + builds: - env: - CGO_ENABLED=0 @@ -36,6 +39,7 @@ builds: - amd64 - arm64 binary: databricks + archives: - format: zip @@ -89,8 +93,10 @@ docker_manifests: checksum: name_template: 'databricks_cli_{{ .Version }}_SHA256SUMS' algorithm: sha256 + snapshot: name_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}' + changelog: sort: asc filters: diff --git a/CHANGELOG.md b/CHANGELOG.md index 2fb35d479..16d81f822 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,127 @@ # Version changelog +## 0.223.1 + +This bugfix release fixes missing error messages in v0.223.0. + +CLI: + * Fix logic error in [#1532](https://github.com/databricks/cli/pull/1532) ([#1564](https://github.com/databricks/cli/pull/1564)). + + +## 0.223.0 + +Bundles: + +As of this release you can interact with bundles when running the CLI on DBR (e.g. via the Web Terminal). + + * Fix non-default project names not working in dbt-sql template ([#1500](https://github.com/databricks/cli/pull/1500)). + * Improve `bundle validate` output ([#1532](https://github.com/databricks/cli/pull/1532)). + * Fixed resolving variable references inside slice variable ([#1550](https://github.com/databricks/cli/pull/1550)). + * Fixed bundle not loading when empty variable is defined ([#1552](https://github.com/databricks/cli/pull/1552)). + * Use `vfs.Path` for filesystem interaction ([#1554](https://github.com/databricks/cli/pull/1554)). + * Replace `vfs.Path` with extension-aware filer when running on DBR ([#1556](https://github.com/databricks/cli/pull/1556)). + +Internal: + * merge.Override: Fix handling of dyn.NilValue ([#1530](https://github.com/databricks/cli/pull/1530)). + * Compare `.Kind()` instead of direct equality checks on a `dyn.Value` ([#1520](https://github.com/databricks/cli/pull/1520)). + * PythonMutator: register product in user agent extra ([#1533](https://github.com/databricks/cli/pull/1533)). + * Ignore `dyn.NilValue` when traversing value from `dyn.Map` ([#1547](https://github.com/databricks/cli/pull/1547)). + * Add extra tests for the sync block ([#1548](https://github.com/databricks/cli/pull/1548)). + * PythonMutator: add diagnostics ([#1531](https://github.com/databricks/cli/pull/1531)). + * PythonMutator: support omitempty in PyDABs ([#1513](https://github.com/databricks/cli/pull/1513)). + * PythonMutator: allow insert 'resources' and 'resources.jobs' ([#1555](https://github.com/databricks/cli/pull/1555)). + +## 0.222.0 + +CLI: + * Add link to documentation for Homebrew installation to README ([#1505](https://github.com/databricks/cli/pull/1505)). + * Fix `databricks configure` to use `DATABRICKS_CONFIG_FILE` environment variable if exists as config file ([#1325](https://github.com/databricks/cli/pull/1325)). + +Bundles: + +The Terraform upgrade to v1.48.0 includes a fix for library order not being respected. + + * Fix conditional in query in `default-sql` template ([#1479](https://github.com/databricks/cli/pull/1479)). + * Remove user credentials specified in the Git origin URL ([#1494](https://github.com/databricks/cli/pull/1494)). + * Serialize dynamic value for `bundle validate` output ([#1499](https://github.com/databricks/cli/pull/1499)). + * Override variables with lookup value even if values has default value set ([#1504](https://github.com/databricks/cli/pull/1504)). + * Pause quality monitors when "mode: development" is used ([#1481](https://github.com/databricks/cli/pull/1481)). + * Return `fs.ModeDir` for Git folders in the workspace ([#1521](https://github.com/databricks/cli/pull/1521)). + * Upgrade TF provider to 1.48.0 ([#1527](https://github.com/databricks/cli/pull/1527)). + * Added support for complex variables ([#1467](https://github.com/databricks/cli/pull/1467)). + +Internal: + * Add randIntn function ([#1475](https://github.com/databricks/cli/pull/1475)). + * Avoid multiple file tree traversals on bundle deploy ([#1493](https://github.com/databricks/cli/pull/1493)). + * Clean up unused code ([#1502](https://github.com/databricks/cli/pull/1502)). + * Use `dyn.InvalidValue` to indicate absence ([#1507](https://github.com/databricks/cli/pull/1507)). + * Add ApplyPythonMutator ([#1430](https://github.com/databricks/cli/pull/1430)). + * Set bool pointer to disable lock ([#1516](https://github.com/databricks/cli/pull/1516)). + * Allow the any type to be set to nil in `convert.FromTyped` ([#1518](https://github.com/databricks/cli/pull/1518)). + * Properly deal with nil values in `convert.FromTyped` ([#1511](https://github.com/databricks/cli/pull/1511)). + * Return `dyn.InvalidValue` instead of `dyn.NilValue` when errors happen ([#1514](https://github.com/databricks/cli/pull/1514)). + * PythonMutator: replace stdin/stdout with files ([#1512](https://github.com/databricks/cli/pull/1512)). + * Add context type and value to path rewriting ([#1525](https://github.com/databricks/cli/pull/1525)). + +API Changes: + * Added schedule CRUD commands to `databricks lakeview`. + * Added subscription CRUD commands to `databricks lakeview`. + * Added `databricks apps start` command. + +OpenAPI commit 7437dabb9dadee402c1fc060df4c1ce8cc5369f0 (2024-06-24) + +Dependency updates: + * Bump golang.org/x/text from 0.15.0 to 0.16.0 ([#1482](https://github.com/databricks/cli/pull/1482)). + * Bump golang.org/x/term from 0.20.0 to 0.21.0 ([#1483](https://github.com/databricks/cli/pull/1483)). + * Bump golang.org/x/mod from 0.17.0 to 0.18.0 ([#1484](https://github.com/databricks/cli/pull/1484)). + * Bump golang.org/x/oauth2 from 0.20.0 to 0.21.0 ([#1485](https://github.com/databricks/cli/pull/1485)). + * Bump github.com/briandowns/spinner from 1.23.0 to 1.23.1 ([#1495](https://github.com/databricks/cli/pull/1495)). + * Bump github.com/spf13/cobra from 1.8.0 to 1.8.1 ([#1496](https://github.com/databricks/cli/pull/1496)). + * Bump github.com/databricks/databricks-sdk-go from 0.42.0 to 0.43.0 ([#1522](https://github.com/databricks/cli/pull/1522)). + +## 0.221.1 + +Bundles: + +This releases fixes an issue introduced in v0.221.0 where managing jobs with a single-node cluster would fail. + + * Fix SQL schema selection in default-sql template ([#1471](https://github.com/databricks/cli/pull/1471)). + * Copy-editing for SQL templates ([#1474](https://github.com/databricks/cli/pull/1474)). + * Upgrade TF provider to 1.47.0 ([#1476](https://github.com/databricks/cli/pull/1476)). + +Internal: + * Use latest version of goreleaser action ([#1477](https://github.com/databricks/cli/pull/1477)). + + + +## 0.221.0 + +CLI: + * Update OpenAPI spec ([#1466](https://github.com/databricks/cli/pull/1466)). + +Bundles: + * Upgrade TF provider to 1.46.0 ([#1460](https://github.com/databricks/cli/pull/1460)). + * Add support for Lakehouse monitoring ([#1307](https://github.com/databricks/cli/pull/1307)). + * Make dbt-sql and default-sql templates public ([#1463](https://github.com/databricks/cli/pull/1463)). + +Internal: + * Abstract over filesystem interaction with libs/vfs ([#1452](https://github.com/databricks/cli/pull/1452)). + * Add `filer.Filer` to read notebooks from WSFS without omitting their extension ([#1457](https://github.com/databricks/cli/pull/1457)). + * Fix listing notebooks in a subdirectory ([#1468](https://github.com/databricks/cli/pull/1468)). + +API Changes: + * Changed `databricks account storage-credentials list` command to return . + * Added `databricks consumer-listings batch-get` command. + * Added `databricks consumer-providers batch-get` command. + * Removed `databricks apps create-deployment` command. + * Added `databricks apps deploy` command. + +OpenAPI commit 37b925eba37dfb3d7e05b6ba2d458454ce62d3a0 (2024-06-03) + +Dependency updates: + * Bump github.com/hashicorp/go-version from 1.6.0 to 1.7.0 ([#1454](https://github.com/databricks/cli/pull/1454)). + * Bump github.com/hashicorp/hc-install from 0.6.4 to 0.7.0 ([#1453](https://github.com/databricks/cli/pull/1453)). + ## 0.220.0 CLI: diff --git a/README.md b/README.md index 5f3b78b79..51780d0f9 100644 --- a/README.md +++ b/README.md @@ -4,18 +4,21 @@ This project is in Public Preview. -Documentation about the full REST API coverage is available in the [docs folder](docs/commands.md). - Documentation is available at https://docs.databricks.com/dev-tools/cli/databricks-cli.html. ## Installation This CLI is packaged as a dependency-free binary executable and may be located in any directory. See https://github.com/databricks/cli/releases for releases and -[the docs pages](https://docs.databricks.com/dev-tools/cli/databricks-cli.html) for -installation instructions. +the [Databricks documentation](https://docs.databricks.com/en/dev-tools/cli/install.html) for detailed information about installing the CLI. ------ +### Homebrew + +We maintain a [Homebrew tap](https://github.com/databricks/homebrew-tap) for installing the Databricks CLI. You can find instructions for how to install, upgrade and downgrade the CLI using Homebrew [here](https://github.com/databricks/homebrew-tap/blob/main/README.md). + +------ +### Docker You can use the CLI via a Docker image by pulling the image from `ghcr.io`. You can find all available versions at: https://github.com/databricks/cli/pkgs/container/cli. ``` diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index 470c329a1..a5f41ae4b 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" @@ -135,36 +136,57 @@ func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, u remotePath := path.Join(wsfsBase, f.RemotePath) for _, job := range b.Config.Resources.Jobs { - for i := range job.Tasks { - task := &job.Tasks[i] - for j := range task.Libraries { - lib := &task.Libraries[j] - if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { - lib.Whl = remotePath - } - if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { - lib.Jar = remotePath - } - } + rewriteArtifactPath(b, f, job, remotePath) + + } + } + + return nil +} + +func rewriteArtifactPath(b *bundle.Bundle, f *config.ArtifactFile, job *resources.Job, remotePath string) { + // Rewrite artifact path in job task libraries + for i := range job.Tasks { + task := &job.Tasks[i] + for j := range task.Libraries { + lib := &task.Libraries[j] + if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { + lib.Whl = remotePath } + if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { + lib.Jar = remotePath + } + } - for i := range job.Environments { - env := &job.Environments[i] - if env.Spec == nil { - continue + // Rewrite artifact path in job task libraries for ForEachTask + if task.ForEachTask != nil { + forEachTask := task.ForEachTask + for j := range forEachTask.Task.Libraries { + lib := &forEachTask.Task.Libraries[j] + if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { + lib.Whl = remotePath } - - for j := range env.Spec.Dependencies { - lib := env.Spec.Dependencies[j] - if isArtifactMatchLibrary(f, lib, b) { - env.Spec.Dependencies[j] = remotePath - } + if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { + lib.Jar = remotePath } } } } - return nil + // Rewrite artifact path in job environments + for i := range job.Environments { + env := &job.Environments[i] + if env.Spec == nil { + continue + } + + for j := range env.Spec.Dependencies { + lib := env.Spec.Dependencies[j] + if isArtifactMatchLibrary(f, lib, b) { + env.Spec.Dependencies[j] = remotePath + } + } + } } func isArtifactMatchLibrary(f *config.ArtifactFile, libPath string, b *bundle.Bundle) bool { diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go index ca0e578bd..53c2798ed 100644 --- a/bundle/artifacts/artifacts_test.go +++ b/bundle/artifacts/artifacts_test.go @@ -52,6 +52,20 @@ func TestArtifactUpload(t *testing.T) { }, }, }, + { + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + }, }, Environments: []jobs.JobEnvironment{ { @@ -88,4 +102,6 @@ func TestArtifactUpload(t *testing.T) { require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) + require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) } diff --git a/bundle/bundle.go b/bundle/bundle.go index 1dc98656a..032d98abc 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -16,7 +16,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/bundle/metadata" - "github.com/databricks/cli/libs/folders" + "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" @@ -35,6 +35,10 @@ type Bundle struct { // It is set when we instantiate a new bundle instance. RootPath string + // BundleRoot is a virtual filesystem path to the root of the bundle. + // Exclusively use this field for filesystem operations. + BundleRoot vfs.Path + Config config.Root // Metadata about the bundle deployment. This is the interface Databricks services @@ -50,6 +54,9 @@ type Bundle struct { clientOnce sync.Once client *databricks.WorkspaceClient + // Files that are synced to the workspace.file_path + Files []fileset.File + // Stores an initialized copy of this bundle's Terraform wrapper. Terraform *tfexec.Terraform @@ -69,7 +76,8 @@ type Bundle struct { func Load(ctx context.Context, path string) (*Bundle, error) { b := &Bundle{ - RootPath: filepath.Clean(path), + RootPath: filepath.Clean(path), + BundleRoot: vfs.MustNew(path), } configFile, err := config.FileNames.FindInPath(path) if err != nil { @@ -204,12 +212,12 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { } func (b *Bundle) GitRepository() (*git.Repository, error) { - rootPath, err := folders.FindDirWithLeaf(b.RootPath, ".git") + _, err := vfs.FindLeafInTree(b.BundleRoot, ".git") if err != nil { return nil, fmt.Errorf("unable to locate repository root: %w", err) } - return git.NewRepository(vfs.MustNew(rootPath)) + return git.NewRepository(b.BundleRoot) } // AuthEnv returns a map with environment variables and their values diff --git a/bundle/bundle_read_only.go b/bundle/bundle_read_only.go index e4a4f9936..59084f2ac 100644 --- a/bundle/bundle_read_only.go +++ b/bundle/bundle_read_only.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" ) @@ -23,6 +24,10 @@ func (r ReadOnlyBundle) RootPath() string { return r.b.RootPath } +func (r ReadOnlyBundle) BundleRoot() vfs.Path { + return r.b.BundleRoot +} + func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient { return r.b.WorkspaceClient() } diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index 908b446e2..a29aa024b 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -2,6 +2,8 @@ package bundle import ( "context" + "errors" + "io/fs" "os" "path/filepath" "testing" @@ -14,7 +16,7 @@ import ( func TestLoadNotExists(t *testing.T) { b, err := Load(context.Background(), "/doesntexist") - assert.True(t, os.IsNotExist(err)) + assert.True(t, errors.Is(err, fs.ErrNotExist)) assert.Nil(t, b) } diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 008d7b909..12048a322 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -23,6 +23,22 @@ type Experimental struct { // be removed in the future once we have a proper workaround like allowing IS_OWNER // as a top-level permission in the DAB. UseLegacyRunAs bool `json:"use_legacy_run_as,omitempty"` + + // PyDABs determines whether to load the 'databricks-pydabs' package. + // + // PyDABs allows to define bundle configuration using Python. + PyDABs PyDABs `json:"pydabs,omitempty"` +} + +type PyDABs struct { + // Enabled is a flag to enable the feature. + Enabled bool `json:"enabled,omitempty"` + + // VEnvPath is path to the virtual environment. + // + // Required if PyDABs is enabled. PyDABs will load the code in the specified + // environment. + VEnvPath string `json:"venv_path,omitempty"` } type Command string diff --git a/bundle/config/generate/job.go b/bundle/config/generate/job.go index 469f84228..3ab5e0122 100644 --- a/bundle/config/generate/job.go +++ b/bundle/config/generate/job.go @@ -17,7 +17,7 @@ func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) { for _, task := range job.Settings.Tasks { v, err := convertTaskToValue(task, taskOrder) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } tasks = append(tasks, v) } diff --git a/bundle/config/mutator/configure_wsfs.go b/bundle/config/mutator/configure_wsfs.go new file mode 100644 index 000000000..17af4828f --- /dev/null +++ b/bundle/config/mutator/configure_wsfs.go @@ -0,0 +1,50 @@ +package mutator + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/vfs" +) + +const envDatabricksRuntimeVersion = "DATABRICKS_RUNTIME_VERSION" + +type configureWSFS struct{} + +func ConfigureWSFS() bundle.Mutator { + return &configureWSFS{} +} + +func (m *configureWSFS) Name() string { + return "ConfigureWSFS" +} + +func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + root := b.BundleRoot.Native() + + // The bundle root must be located in /Workspace/ + if !strings.HasPrefix(root, "/Workspace/") { + return nil + } + + // The executable must be running on DBR. + if _, ok := env.Lookup(ctx, envDatabricksRuntimeVersion); !ok { + return nil + } + + // If so, swap out vfs.Path instance of the sync root with one that + // makes all Workspace File System interactions extension aware. + p, err := vfs.NewFilerPath(ctx, root, func(path string) (filer.Filer, error) { + return filer.NewWorkspaceFilesExtensionsClient(b.WorkspaceClient(), path) + }) + if err != nil { + return diag.FromErr(err) + } + + b.BundleRoot = p + return nil +} diff --git a/bundle/config/mutator/environments_compat.go b/bundle/config/mutator/environments_compat.go index cbedcaefd..fb898edea 100644 --- a/bundle/config/mutator/environments_compat.go +++ b/bundle/config/mutator/environments_compat.go @@ -32,18 +32,18 @@ func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) dia targets := v.Get("targets") // Return an error if both "environments" and "targets" are set. - if environments != dyn.NilValue && targets != dyn.NilValue { - return dyn.NilValue, fmt.Errorf( + if environments.Kind() != dyn.KindInvalid && targets.Kind() != dyn.KindInvalid { + return dyn.InvalidValue, fmt.Errorf( "both 'environments' and 'targets' are specified; only 'targets' should be used: %s", environments.Location().String(), ) } // Rewrite "environments" to "targets". - if environments != dyn.NilValue && targets == dyn.NilValue { + if environments.Kind() != dyn.KindInvalid && targets.Kind() == dyn.KindInvalid { nv, err := dyn.Set(v, "targets", environments) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } // Drop the "environments" key. return dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index d8b76f39e..9b1c963c9 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/log" - "github.com/databricks/cli/libs/vfs" ) type loadGitDetails struct{} @@ -23,7 +22,7 @@ func (m *loadGitDetails) Name() string { func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Load relevant git repository - repo, err := git.NewRepository(vfs.MustNew(b.RootPath)) + repo, err := git.NewRepository(b.BundleRoot) if err != nil { return diag.FromErr(err) } diff --git a/bundle/config/mutator/merge_job_clusters.go b/bundle/config/mutator/merge_job_clusters.go index 20f4efe85..aa131f287 100644 --- a/bundle/config/mutator/merge_job_clusters.go +++ b/bundle/config/mutator/merge_job_clusters.go @@ -21,7 +21,7 @@ func (m *mergeJobClusters) Name() string { func (m *mergeJobClusters) jobClusterKey(v dyn.Value) string { switch v.Kind() { - case dyn.KindNil: + case dyn.KindInvalid, dyn.KindNil: return "" case dyn.KindString: return v.MustString() @@ -32,7 +32,7 @@ func (m *mergeJobClusters) jobClusterKey(v dyn.Value) string { func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - if v == dyn.NilValue { + if v.Kind() == dyn.KindNil { return v, nil } diff --git a/bundle/config/mutator/merge_job_tasks.go b/bundle/config/mutator/merge_job_tasks.go index 68c05383c..9498e8822 100644 --- a/bundle/config/mutator/merge_job_tasks.go +++ b/bundle/config/mutator/merge_job_tasks.go @@ -21,7 +21,7 @@ func (m *mergeJobTasks) Name() string { func (m *mergeJobTasks) taskKeyString(v dyn.Value) string { switch v.Kind() { - case dyn.KindNil: + case dyn.KindInvalid, dyn.KindNil: return "" case dyn.KindString: return v.MustString() @@ -32,7 +32,7 @@ func (m *mergeJobTasks) taskKeyString(v dyn.Value) string { func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - if v == dyn.NilValue { + if v.Kind() == dyn.KindNil { return v, nil } diff --git a/bundle/config/mutator/merge_pipeline_clusters.go b/bundle/config/mutator/merge_pipeline_clusters.go index 0b1cf8983..52f3e6fa6 100644 --- a/bundle/config/mutator/merge_pipeline_clusters.go +++ b/bundle/config/mutator/merge_pipeline_clusters.go @@ -22,7 +22,7 @@ func (m *mergePipelineClusters) Name() string { func (m *mergePipelineClusters) clusterLabel(v dyn.Value) string { switch v.Kind() { - case dyn.KindNil: + case dyn.KindInvalid, dyn.KindNil: // Note: the cluster label is optional and defaults to 'default'. // We therefore ALSO merge all clusters without a label. return "default" @@ -35,7 +35,7 @@ func (m *mergePipelineClusters) clusterLabel(v dyn.Value) string { func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - if v == dyn.NilValue { + if v.Kind() == dyn.KindNil { return v, nil } diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index ae0d7e5fb..52f85eeb8 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -4,6 +4,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/loader" + pythonmutator "github.com/databricks/cli/bundle/config/mutator/python" "github.com/databricks/cli/bundle/scripts" ) @@ -24,12 +25,6 @@ func DefaultMutators() []bundle.Mutator { InitializeVariables(), DefineDefaultTarget(), LoadGitDetails(), + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoad), } } - -func DefaultMutatorsForTarget(target string) []bundle.Mutator { - return append( - DefaultMutators(), - SelectTarget(target), - ) -} diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 8e70fab73..b50716fd6 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -9,8 +9,8 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/diag" - "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -33,10 +33,8 @@ func (m *processTargetMode) Name() string { func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if !b.Config.Bundle.Deployment.Lock.IsExplicitlyEnabled() { log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true") - err := disableDeploymentLock(b) - if err != nil { - return diag.FromErr(err) - } + disabled := false + b.Config.Bundle.Deployment.Lock.Enabled = &disabled } r := b.Config.Resources @@ -105,15 +103,16 @@ func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagno // (registered models in Unity Catalog don't yet support tags) } - return nil -} + for i := range r.QualityMonitors { + // Remove all schedules from monitors, since they don't support pausing/unpausing. + // Quality monitors might support the "pause" property in the future, so at the + // CLI level we do respect that property if it is set to "unpaused". + if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused { + r.QualityMonitors[i].Schedule = nil + } + } -func disableDeploymentLock(b *bundle.Bundle) error { - return b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { - return dyn.Map(v, "bundle.deployment.lock", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { - return dyn.Set(v, "enabled", dyn.V(false)) - }) - }) + return nil } func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index cf8229bfe..03da64e77 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -99,6 +99,20 @@ func mockBundle(mode config.Mode) *bundle.Bundle { }, QualityMonitors: map[string]*resources.QualityMonitor{ "qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}}, + "qualityMonitor2": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "qualityMonitor2", + Schedule: &catalog.MonitorCronSchedule{}, + }, + }, + "qualityMonitor3": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "qualityMonitor3", + Schedule: &catalog.MonitorCronSchedule{ + PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused, + }, + }, + }, }, }, }, @@ -151,6 +165,8 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Quality Monitor 1 assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) + assert.Nil(t, b.Config.Resources.QualityMonitors["qualityMonitor2"].Schedule) + assert.Equal(t, catalog.MonitorCronSchedulePauseStatusUnpaused, b.Config.Resources.QualityMonitors["qualityMonitor3"].Schedule.PauseStatus) } func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { @@ -314,7 +330,7 @@ func TestDisableLocking(t *testing.T) { ctx := context.Background() b := mockBundle(config.Development) - err := transformDevelopmentMode(ctx, b) + err := bundle.Apply(ctx, b, ProcessTargetMode()) require.Nil(t, err) assert.False(t, b.Config.Bundle.Deployment.Lock.IsEnabled()) } @@ -325,7 +341,7 @@ func TestDisableLockingDisabled(t *testing.T) { explicitlyEnabled := true b.Config.Bundle.Deployment.Lock.Enabled = &explicitlyEnabled - err := transformDevelopmentMode(ctx, b) + err := bundle.Apply(ctx, b, ProcessTargetMode()) require.Nil(t, err) assert.True(t, b.Config.Bundle.Deployment.Lock.IsEnabled(), "Deployment lock should remain enabled in development mode when explicitly enabled") } diff --git a/bundle/config/mutator/python/log_writer.go b/bundle/config/mutator/python/log_writer.go new file mode 100644 index 000000000..aa3db0571 --- /dev/null +++ b/bundle/config/mutator/python/log_writer.go @@ -0,0 +1,42 @@ +package python + +import ( + "bufio" + "bytes" + "context" + "io" + + "github.com/databricks/cli/libs/log" +) + +type logWriter struct { + ctx context.Context + prefix string + buf bytes.Buffer +} + +// newLogWriter creates a new io.Writer that writes to log with specified prefix. +func newLogWriter(ctx context.Context, prefix string) io.Writer { + return &logWriter{ + ctx: ctx, + prefix: prefix, + } +} + +func (p *logWriter) Write(bytes []byte) (n int, err error) { + p.buf.Write(bytes) + + scanner := bufio.NewScanner(&p.buf) + + for scanner.Scan() { + line := scanner.Text() + + log.Debugf(p.ctx, "%s%s", p.prefix, line) + } + + remaining := p.buf.Bytes() + p.buf.Reset() + p.buf.Write(remaining) + + return len(bytes), nil +} diff --git a/bundle/config/mutator/python/python_diagnostics.go b/bundle/config/mutator/python/python_diagnostics.go new file mode 100644 index 000000000..b8efc9ef7 --- /dev/null +++ b/bundle/config/mutator/python/python_diagnostics.go @@ -0,0 +1,97 @@ +package python + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type pythonDiagnostic struct { + Severity pythonSeverity `json:"severity"` + Summary string `json:"summary"` + Detail string `json:"detail,omitempty"` + Location pythonDiagnosticLocation `json:"location,omitempty"` + Path string `json:"path,omitempty"` +} + +type pythonDiagnosticLocation struct { + File string `json:"file"` + Line int `json:"line"` + Column int `json:"column"` +} + +type pythonSeverity = string + +const ( + pythonError pythonSeverity = "error" + pythonWarning pythonSeverity = "warning" +) + +// parsePythonDiagnostics parses diagnostics from the Python mutator. +// +// diagnostics file is newline-separated JSON objects with pythonDiagnostic structure. +func parsePythonDiagnostics(input io.Reader) (diag.Diagnostics, error) { + diags := diag.Diagnostics{} + decoder := json.NewDecoder(input) + + for decoder.More() { + var parsedLine pythonDiagnostic + + err := decoder.Decode(&parsedLine) + if err != nil { + return nil, fmt.Errorf("failed to parse diags: %s", err) + } + + severity, err := convertPythonSeverity(parsedLine.Severity) + if err != nil { + return nil, fmt.Errorf("failed to parse severity: %s", err) + } + + path, err := convertPythonPath(parsedLine.Path) + if err != nil { + return nil, fmt.Errorf("failed to parse path: %s", err) + } + + diag := diag.Diagnostic{ + Severity: severity, + Summary: parsedLine.Summary, + Detail: parsedLine.Detail, + Location: convertPythonLocation(parsedLine.Location), + Path: path, + } + + diags = diags.Append(diag) + } + + return diags, nil +} + +func convertPythonPath(path string) (dyn.Path, error) { + if path == "" { + return nil, nil + } + + return dyn.NewPathFromString(path) +} + +func convertPythonSeverity(severity pythonSeverity) (diag.Severity, error) { + switch severity { + case pythonError: + return diag.Error, nil + case pythonWarning: + return diag.Warning, nil + default: + return 0, fmt.Errorf("unexpected value: %s", severity) + } +} + +func convertPythonLocation(location pythonDiagnosticLocation) dyn.Location { + return dyn.Location{ + File: location.File, + Line: location.Line, + Column: location.Column, + } +} diff --git a/bundle/config/mutator/python/python_diagnostics_test.go b/bundle/config/mutator/python/python_diagnostics_test.go new file mode 100644 index 000000000..7b66e2537 --- /dev/null +++ b/bundle/config/mutator/python/python_diagnostics_test.go @@ -0,0 +1,107 @@ +package python + +import ( + "bytes" + "testing" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +func TestConvertPythonLocation(t *testing.T) { + location := convertPythonLocation(pythonDiagnosticLocation{ + File: "src/examples/file.py", + Line: 1, + Column: 2, + }) + + assert.Equal(t, dyn.Location{ + File: "src/examples/file.py", + Line: 1, + Column: 2, + }, location) +} + +type parsePythonDiagnosticsTest struct { + name string + input string + expected diag.Diagnostics +} + +func TestParsePythonDiagnostics(t *testing.T) { + + testCases := []parsePythonDiagnosticsTest{ + { + name: "short error with location", + input: `{"severity": "error", "summary": "error summary", "location": {"file": "src/examples/file.py", "line": 1, "column": 2}}`, + expected: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "error summary", + Location: dyn.Location{ + File: "src/examples/file.py", + Line: 1, + Column: 2, + }, + }, + }, + }, + { + name: "short error with path", + input: `{"severity": "error", "summary": "error summary", "path": "resources.jobs.job0.name"}`, + expected: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "error summary", + Path: dyn.MustPathFromString("resources.jobs.job0.name"), + }, + }, + }, + { + name: "empty file", + input: "", + expected: diag.Diagnostics{}, + }, + { + name: "newline file", + input: "\n", + expected: diag.Diagnostics{}, + }, + { + name: "warning with detail", + input: `{"severity": "warning", "summary": "warning summary", "detail": "warning detail"}`, + expected: diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: "warning summary", + Detail: "warning detail", + }, + }, + }, + { + name: "multiple errors", + input: `{"severity": "error", "summary": "error summary (1)"}` + "\n" + + `{"severity": "error", "summary": "error summary (2)"}`, + expected: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "error summary (1)", + }, + { + Severity: diag.Error, + Summary: "error summary (2)", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + diags, err := parsePythonDiagnostics(bytes.NewReader([]byte(tc.input))) + + assert.NoError(t, err) + assert.Equal(t, tc.expected, diags) + }) + } +} diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go new file mode 100644 index 000000000..f9febe5b5 --- /dev/null +++ b/bundle/config/mutator/python/python_mutator.go @@ -0,0 +1,433 @@ +package python + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/databricks/databricks-sdk-go/logger" + + "github.com/databricks/cli/bundle/env" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/process" +) + +type phase string + +const ( + // PythonMutatorPhaseLoad is the phase in which bundle configuration is loaded. + // + // At this stage, PyDABs adds statically defined resources to the bundle configuration. + // Which resources are added should be deterministic and not depend on the bundle configuration. + // + // We also open for possibility of appending other sections of bundle configuration, + // for example, adding new variables. However, this is not supported yet, and CLI rejects + // such changes. + PythonMutatorPhaseLoad phase = "load" + + // PythonMutatorPhaseInit is the phase after bundle configuration was loaded, and + // the list of statically declared resources is known. + // + // At this stage, PyDABs adds resources defined using generators, or mutates existing resources, + // including the ones defined using YAML. + // + // During this process, within generator and mutators, PyDABs can access: + // - selected deployment target + // - bundle variables values + // - variables provided through CLI arguments or environment variables + // + // The following is not available: + // - variables referencing other variables are in unresolved format + // + // PyDABs can output YAML containing references to variables, and CLI should resolve them. + // + // Existing resources can't be removed, and CLI rejects such changes. + PythonMutatorPhaseInit phase = "init" +) + +type pythonMutator struct { + phase phase +} + +func PythonMutator(phase phase) bundle.Mutator { + return &pythonMutator{ + phase: phase, + } +} + +func (m *pythonMutator) Name() string { + return fmt.Sprintf("PythonMutator(%s)", m.phase) +} + +func getExperimental(b *bundle.Bundle) config.Experimental { + if b.Config.Experimental == nil { + return config.Experimental{} + } + + return *b.Config.Experimental +} + +func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + experimental := getExperimental(b) + + if !experimental.PyDABs.Enabled { + return nil + } + + if experimental.PyDABs.VEnvPath == "" { + return diag.Errorf("\"experimental.pydabs.enabled\" can only be used when \"experimental.pydabs.venv_path\" is set") + } + + // mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics' + var mutateDiags diag.Diagnostics + var mutateDiagsHasError = errors.New("unexpected error") + + err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { + pythonPath := interpreterPath(experimental.PyDABs.VEnvPath) + + if _, err := os.Stat(pythonPath); err != nil { + if os.IsNotExist(err) { + return dyn.InvalidValue, fmt.Errorf("can't find %q, check if venv is created", pythonPath) + } else { + return dyn.InvalidValue, fmt.Errorf("can't find %q: %w", pythonPath, err) + } + } + + cacheDir, err := createCacheDir(ctx) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err) + } + + rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot) + mutateDiags = diags + if diags.HasError() { + return dyn.InvalidValue, mutateDiagsHasError + } + + visitor, err := createOverrideVisitor(ctx, m.phase) + if err != nil { + return dyn.InvalidValue, err + } + + return merge.Override(leftRoot, rightRoot, visitor) + }) + + if err == mutateDiagsHasError { + if !mutateDiags.HasError() { + panic("mutateDiags has no error, but error is expected") + } + + return mutateDiags + } + + return mutateDiags.Extend(diag.FromErr(err)) +} + +func createCacheDir(ctx context.Context) (string, error) { + // b.CacheDir doesn't work because target isn't yet selected + + // support the same env variable as in b.CacheDir + if tempDir, exists := env.TempDir(ctx); exists { + // use 'default' as target name + cacheDir := filepath.Join(tempDir, "default", "pydabs") + + err := os.MkdirAll(cacheDir, 0700) + if err != nil { + return "", err + } + + return cacheDir, nil + } + + return os.MkdirTemp("", "-pydabs") +} + +func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { + inputPath := filepath.Join(cacheDir, "input.json") + outputPath := filepath.Join(cacheDir, "output.json") + diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json") + + args := []string{ + pythonPath, + "-m", + "databricks.bundles.build", + "--phase", + string(m.phase), + "--input", + inputPath, + "--output", + outputPath, + "--diagnostics", + diagnosticsPath, + } + + if err := writeInputFile(inputPath, root); err != nil { + return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err) + } + + stderrWriter := newLogWriter(ctx, "stderr: ") + stdoutWriter := newLogWriter(ctx, "stdout: ") + + _, processErr := process.Background( + ctx, + args, + process.WithDir(rootPath), + process.WithStderrWriter(stderrWriter), + process.WithStdoutWriter(stdoutWriter), + ) + if processErr != nil { + logger.Debugf(ctx, "python mutator process failed: %s", processErr) + } + + pythonDiagnostics, pythonDiagnosticsErr := loadDiagnosticsFile(diagnosticsPath) + if pythonDiagnosticsErr != nil { + logger.Debugf(ctx, "failed to load diagnostics: %s", pythonDiagnosticsErr) + } + + // if diagnostics file exists, it gives the most descriptive errors + // if there is any error, we treat it as fatal error, and stop processing + if pythonDiagnostics.HasError() { + return dyn.InvalidValue, pythonDiagnostics + } + + // process can fail without reporting errors in diagnostics file or creating it, for instance, + // venv doesn't have PyDABs library installed + if processErr != nil { + return dyn.InvalidValue, diag.Errorf("python mutator process failed: %sw, use --debug to enable logging", processErr) + } + + // or we can fail to read diagnostics file, that should always be created + if pythonDiagnosticsErr != nil { + return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr) + } + + output, err := loadOutputFile(rootPath, outputPath) + if err != nil { + return dyn.InvalidValue, diag.Errorf("failed to load Python mutator output: %s", err) + } + + // we pass through pythonDiagnostic because it contains warnings + return output, pythonDiagnostics +} + +func writeInputFile(inputPath string, input dyn.Value) error { + // we need to marshal dyn.Value instead of bundle.Config to JSON to support + // non-string fields assigned with bundle variables + rootConfigJson, err := json.Marshal(input.AsAny()) + if err != nil { + return fmt.Errorf("failed to marshal input: %w", err) + } + + return os.WriteFile(inputPath, rootConfigJson, 0600) +} + +func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) { + outputFile, err := os.Open(outputPath) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to open output file: %w", err) + } + + defer outputFile.Close() + + // we need absolute path because later parts of pipeline assume all paths are absolute + // and this file will be used as location to resolve relative paths. + // + // virtualPath has to stay in rootPath, because locations outside root path are not allowed: + // + // Error: path /var/folders/.../pydabs/dist/*.whl is not contained in bundle root path + // + // for that, we pass virtualPath instead of outputPath as file location + virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml")) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to get absolute path: %w", err) + } + + generated, err := yamlloader.LoadYAML(virtualPath, outputFile) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to parse output file: %w", err) + } + + normalized, diagnostic := convert.Normalize(config.Root{}, generated) + if diagnostic.Error() != nil { + return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %w", diagnostic.Error()) + } + + // warnings shouldn't happen because output should be already normalized + // when it happens, it's a bug in the mutator, and should be treated as an error + + for _, d := range diagnostic.Filter(diag.Warning) { + return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %s", d.Summary) + } + + return normalized, nil +} + +// loadDiagnosticsFile loads diagnostics from a file. +// +// It contains a list of warnings and errors that we should print to users. +// +// If the file doesn't exist, we return an error. We expect the file to always be +// created by the Python mutator, and it's absence means there are integration problems, +// and the diagnostics file was lost. If we treat non-existence as an empty diag.Diagnostics +// we risk loosing errors and warnings. +func loadDiagnosticsFile(path string) (diag.Diagnostics, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open diagnostics file: %w", err) + } + + defer file.Close() + + return parsePythonDiagnostics(file) +} + +func createOverrideVisitor(ctx context.Context, phase phase) (merge.OverrideVisitor, error) { + switch phase { + case PythonMutatorPhaseLoad: + return createLoadOverrideVisitor(ctx), nil + case PythonMutatorPhaseInit: + return createInitOverrideVisitor(ctx), nil + default: + return merge.OverrideVisitor{}, fmt.Errorf("unknown phase: %s", phase) + } +} + +// createLoadOverrideVisitor creates an override visitor for the load phase. +// +// During load, it's only possible to create new resources, and not modify or +// delete existing ones. +func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { + resourcesPath := dyn.NewPath(dyn.Key("resources")) + jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) + + return merge.OverrideVisitor{ + VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + if isOmitemptyDelete(left) { + return merge.ErrOverrideUndoDelete + } + + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) + }, + VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + // insert 'resources' or 'resources.jobs' if it didn't exist before + if valuePath.Equal(resourcesPath) || valuePath.Equal(jobsPath) { + return right, nil + } + + if !valuePath.HasPrefix(jobsPath) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + + insertResource := len(valuePath) == len(jobsPath)+1 + + // adding a property into an existing resource is not allowed, because it changes it + if !insertResource { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + + log.Debugf(ctx, "Insert value at %q", valuePath.String()) + + return right, nil + }, + VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) + }, + } +} + +// createInitOverrideVisitor creates an override visitor for the init phase. +// +// During the init phase it's possible to create new resources, modify existing +// resources, but not delete existing resources. +func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { + resourcesPath := dyn.NewPath(dyn.Key("resources")) + jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) + + return merge.OverrideVisitor{ + VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + if isOmitemptyDelete(left) { + return merge.ErrOverrideUndoDelete + } + + if !valuePath.HasPrefix(jobsPath) { + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) + } + + deleteResource := len(valuePath) == len(jobsPath)+1 + + if deleteResource { + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) + } + + // deleting properties is allowed because it only changes an existing resource + log.Debugf(ctx, "Delete value at %q", valuePath.String()) + + return nil + }, + VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + // insert 'resources' or 'resources.jobs' if it didn't exist before + if valuePath.Equal(resourcesPath) || valuePath.Equal(jobsPath) { + return right, nil + } + + if !valuePath.HasPrefix(jobsPath) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + + log.Debugf(ctx, "Insert value at %q", valuePath.String()) + + return right, nil + }, + VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + if !valuePath.HasPrefix(jobsPath) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) + } + + log.Debugf(ctx, "Update value at %q", valuePath.String()) + + return right, nil + }, + } +} + +func isOmitemptyDelete(left dyn.Value) bool { + // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // there is no semantic difference between empty and missing, so we keep them as they were before + // PyDABs deleted them. + + switch left.Kind() { + case dyn.KindMap: + return left.MustMap().Len() == 0 + + case dyn.KindSequence: + return len(left.MustSequence()) == 0 + + case dyn.KindNil: + // map/sequence can be nil, for instance, bad YAML like: `foo:` + return true + + default: + return false + } +} + +// interpreterPath returns platform-specific path to Python interpreter in the virtual environment. +func interpreterPath(venvPath string) string { + if runtime.GOOS == "windows" { + return filepath.Join(venvPath, "Scripts", "python3.exe") + } else { + return filepath.Join(venvPath, "bin", "python3") + } +} diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go new file mode 100644 index 000000000..9a0ed8c3a --- /dev/null +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -0,0 +1,625 @@ +package python + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/databricks/cli/libs/dyn/merge" + + "github.com/databricks/cli/bundle/env" + "github.com/stretchr/testify/require" + + "golang.org/x/exp/maps" + + "github.com/databricks/cli/libs/dyn" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/cli/libs/process" +) + +func TestPythonMutator_Name_load(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseLoad) + + assert.Equal(t, "PythonMutator(load)", mutator.Name()) +} + +func TestPythonMutator_Name_init(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseInit) + + assert.Equal(t, "PythonMutator(init)", mutator.Name()) +} + +func TestPythonMutator_load(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + t, + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "load", + }, + `{ + "experimental": { + "pydabs": { + "enabled": true, + "venv_path": ".venv" + } + }, + "resources": { + "jobs": { + "job0": { + name: "job_0" + }, + "job1": { + name: "job_1" + }, + } + } + }`, + `{"severity": "warning", "summary": "job doesn't have any tasks", "location": {"file": "src/examples/file.py", "line": 10, "column": 5}}`, + ) + + mutator := PythonMutator(PythonMutatorPhaseLoad) + diags := bundle.Apply(ctx, b, mutator) + + assert.NoError(t, diags.Error()) + + assert.ElementsMatch(t, []string{"job0", "job1"}, maps.Keys(b.Config.Resources.Jobs)) + + if job0, ok := b.Config.Resources.Jobs["job0"]; ok { + assert.Equal(t, "job_0", job0.Name) + } + + if job1, ok := b.Config.Resources.Jobs["job1"]; ok { + assert.Equal(t, "job_1", job1.Name) + } + + assert.Equal(t, 1, len(diags)) + assert.Equal(t, "job doesn't have any tasks", diags[0].Summary) + assert.Equal(t, dyn.Location{ + File: "src/examples/file.py", + Line: 10, + Column: 5, + }, diags[0].Location) +} + +func TestPythonMutator_load_disallowed(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + t, + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "load", + }, + `{ + "experimental": { + "pydabs": { + "enabled": true, + "venv_path": ".venv" + } + }, + "resources": { + "jobs": { + "job0": { + name: "job_0", + description: "job description" + } + } + } + }`, "") + + mutator := PythonMutator(PythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.EqualError(t, diag.Error(), "unexpected change at \"resources.jobs.job0.description\" (insert)") +} + +func TestPythonMutator_init(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + t, + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "init", + }, + `{ + "experimental": { + "pydabs": { + "enabled": true, + "venv_path": ".venv" + } + }, + "resources": { + "jobs": { + "job0": { + name: "job_0", + description: "my job" + } + } + } + }`, "") + + mutator := PythonMutator(PythonMutatorPhaseInit) + diag := bundle.Apply(ctx, b, mutator) + + assert.NoError(t, diag.Error()) + + assert.ElementsMatch(t, []string{"job0"}, maps.Keys(b.Config.Resources.Jobs)) + assert.Equal(t, "job_0", b.Config.Resources.Jobs["job0"].Name) + assert.Equal(t, "my job", b.Config.Resources.Jobs["job0"].Description) + + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + // 'name' wasn't changed, so it keeps its location + name, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.name")) + require.NoError(t, err) + assert.Equal(t, "databricks.yml", name.Location().File) + + // 'description' was updated by PyDABs and has location of generated file until + // we implement source maps + description, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.description")) + require.NoError(t, err) + + expectedVirtualPath, err := filepath.Abs("__generated_by_pydabs__.yml") + require.NoError(t, err) + assert.Equal(t, expectedVirtualPath, description.Location().File) + + return v, nil + }) + assert.NoError(t, err) +} + +func TestPythonMutator_badOutput(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + t, + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "load", + }, + `{ + "resources": { + "jobs": { + "job0": { + unknown_property: "my job" + } + } + } + }`, "") + + mutator := PythonMutator(PythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.EqualError(t, diag.Error(), "failed to load Python mutator output: failed to normalize output: unknown field: unknown_property") +} + +func TestPythonMutator_disabled(t *testing.T) { + b := loadYaml("databricks.yml", ``) + + ctx := context.Background() + mutator := PythonMutator(PythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.NoError(t, diag.Error()) +} + +func TestPythonMutator_venvRequired(t *testing.T) { + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true`) + + ctx := context.Background() + mutator := PythonMutator(PythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.Error(t, diag.Error(), "\"experimental.enable_pydabs\" is enabled, but \"experimental.venv.path\" is not set") +} + +func TestPythonMutator_venvNotFound(t *testing.T) { + expectedError := fmt.Sprintf("can't find %q, check if venv is created", interpreterPath("bad_path")) + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: bad_path`) + + mutator := PythonMutator(PythonMutatorPhaseInit) + diag := bundle.Apply(context.Background(), b, mutator) + + assert.EqualError(t, diag.Error(), expectedError) +} + +type createOverrideVisitorTestCase struct { + name string + updatePath dyn.Path + deletePath dyn.Path + insertPath dyn.Path + phase phase + updateError error + deleteError error + insertError error +} + +func TestCreateOverrideVisitor(t *testing.T) { + left := dyn.NewValue(42, dyn.Location{}) + right := dyn.NewValue(1337, dyn.Location{}) + + testCases := []createOverrideVisitorTestCase{ + { + name: "load: can't change an existing job", + phase: PythonMutatorPhaseLoad, + updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), + deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), + insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), + deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (delete)"), + insertError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (insert)"), + updateError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (update)"), + }, + { + name: "load: can't delete an existing job", + phase: PythonMutatorPhaseLoad, + deletePath: dyn.MustPathFromString("resources.jobs.job0"), + deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + }, + { + name: "load: can insert 'resources'", + phase: PythonMutatorPhaseLoad, + insertPath: dyn.MustPathFromString("resources"), + insertError: nil, + }, + { + name: "load: can insert 'resources.jobs'", + phase: PythonMutatorPhaseLoad, + insertPath: dyn.MustPathFromString("resources.jobs"), + insertError: nil, + }, + { + name: "load: can insert a job", + phase: PythonMutatorPhaseLoad, + insertPath: dyn.MustPathFromString("resources.jobs.job0"), + insertError: nil, + }, + { + name: "load: can't change include", + phase: PythonMutatorPhaseLoad, + deletePath: dyn.MustPathFromString("include[0]"), + insertPath: dyn.MustPathFromString("include[0]"), + updatePath: dyn.MustPathFromString("include[0]"), + deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), + insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), + updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + }, + { + name: "init: can change an existing job", + phase: PythonMutatorPhaseInit, + updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), + deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), + insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), + deleteError: nil, + insertError: nil, + updateError: nil, + }, + { + name: "init: can't delete an existing job", + phase: PythonMutatorPhaseInit, + deletePath: dyn.MustPathFromString("resources.jobs.job0"), + deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + }, + { + name: "init: can insert 'resources'", + phase: PythonMutatorPhaseInit, + insertPath: dyn.MustPathFromString("resources"), + insertError: nil, + }, + { + name: "init: can insert 'resources.jobs'", + phase: PythonMutatorPhaseInit, + insertPath: dyn.MustPathFromString("resources.jobs"), + insertError: nil, + }, + { + name: "init: can insert a job", + phase: PythonMutatorPhaseInit, + insertPath: dyn.MustPathFromString("resources.jobs.job0"), + insertError: nil, + }, + { + name: "init: can't change include", + phase: PythonMutatorPhaseInit, + deletePath: dyn.MustPathFromString("include[0]"), + insertPath: dyn.MustPathFromString("include[0]"), + updatePath: dyn.MustPathFromString("include[0]"), + deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), + insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), + updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + }, + } + + for _, tc := range testCases { + visitor, err := createOverrideVisitor(context.Background(), tc.phase) + if err != nil { + t.Fatalf("create visitor failed: %v", err) + } + + if tc.updatePath != nil { + t.Run(tc.name+"-update", func(t *testing.T) { + out, err := visitor.VisitUpdate(tc.updatePath, left, right) + + if tc.updateError != nil { + assert.Equal(t, tc.updateError, err) + } else { + assert.NoError(t, err) + assert.Equal(t, right, out) + } + }) + } + + if tc.deletePath != nil { + t.Run(tc.name+"-delete", func(t *testing.T) { + err := visitor.VisitDelete(tc.deletePath, left) + + if tc.deleteError != nil { + assert.Equal(t, tc.deleteError, err) + } else { + assert.NoError(t, err) + } + }) + } + + if tc.insertPath != nil { + t.Run(tc.name+"-insert", func(t *testing.T) { + out, err := visitor.VisitInsert(tc.insertPath, right) + + if tc.insertError != nil { + assert.Equal(t, tc.insertError, err) + } else { + assert.NoError(t, err) + assert.Equal(t, right, out) + } + }) + } + } +} + +type overrideVisitorOmitemptyTestCase struct { + name string + path dyn.Path + left dyn.Value + phases []phase + expectedErr error +} + +func TestCreateOverrideVisitor_omitempty(t *testing.T) { + // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // there is no semantic difference between empty and missing, so we keep them as they were before + // PyDABs deleted them. + + allPhases := []phase{PythonMutatorPhaseLoad, PythonMutatorPhaseInit} + location := dyn.Location{ + File: "databricks.yml", + Line: 10, + Column: 20, + } + + testCases := []overrideVisitorOmitemptyTestCase{ + { + // this is not happening, but adding for completeness + name: "undo delete of empty variables", + path: dyn.MustPathFromString("variables"), + left: dyn.NewValue([]dyn.Value{}, location), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + { + name: "undo delete of empty job clusters", + path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"), + left: dyn.NewValue([]dyn.Value{}, location), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + { + name: "allow delete of non-empty job clusters", + path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"), + left: dyn.NewValue([]dyn.Value{dyn.NewValue("abc", location)}, location), + expectedErr: nil, + // deletions aren't allowed in 'load' phase + phases: []phase{PythonMutatorPhaseInit}, + }, + { + name: "undo delete of empty tags", + path: dyn.MustPathFromString("resources.jobs.job0.tags"), + left: dyn.NewValue(map[string]dyn.Value{}, location), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + { + name: "allow delete of non-empty tags", + path: dyn.MustPathFromString("resources.jobs.job0.tags"), + left: dyn.NewValue( + map[string]dyn.Value{"dev": dyn.NewValue("true", location)}, + location, + ), + expectedErr: nil, + // deletions aren't allowed in 'load' phase + phases: []phase{PythonMutatorPhaseInit}, + }, + { + name: "undo delete of nil", + path: dyn.MustPathFromString("resources.jobs.job0.tags"), + left: dyn.NilValue.WithLocation(location), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + } + + for _, tc := range testCases { + for _, phase := range tc.phases { + t.Run(tc.name+"-"+string(phase), func(t *testing.T) { + visitor, err := createOverrideVisitor(context.Background(), phase) + require.NoError(t, err) + + err = visitor.VisitDelete(tc.path, tc.left) + + assert.Equal(t, tc.expectedErr, err) + }) + } + } +} + +func TestLoadDiagnosticsFile_nonExistent(t *testing.T) { + // this is an important behaviour, see loadDiagnosticsFile docstring + _, err := loadDiagnosticsFile("non_existent_file.json") + + assert.Error(t, err) +} + +func TestInterpreterPath(t *testing.T) { + if runtime.GOOS == "windows" { + assert.Equal(t, "venv\\Scripts\\python3.exe", interpreterPath("venv")) + } else { + assert.Equal(t, "venv/bin/python3", interpreterPath("venv")) + } +} + +func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context { + ctx := context.Background() + ctx, stub := process.WithStub(ctx) + + t.Setenv(env.TempDirVariable, t.TempDir()) + + // after we override env variable, we always get the same cache dir as mutator + cacheDir, err := createCacheDir(ctx) + require.NoError(t, err) + + inputPath := filepath.Join(cacheDir, "input.json") + outputPath := filepath.Join(cacheDir, "output.json") + diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json") + + args = append(args, "--input", inputPath) + args = append(args, "--output", outputPath) + args = append(args, "--diagnostics", diagnosticsPath) + + stub.WithCallback(func(actual *exec.Cmd) error { + _, err := os.Stat(inputPath) + assert.NoError(t, err) + + if reflect.DeepEqual(actual.Args, args) { + err := os.WriteFile(outputPath, []byte(output), 0600) + require.NoError(t, err) + + err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0600) + require.NoError(t, err) + + return nil + } else { + return fmt.Errorf("unexpected command: %v", actual.Args) + } + }) + + return ctx +} + +func loadYaml(name string, content string) *bundle.Bundle { + v, diag := config.LoadFromBytes(name, []byte(content)) + + if diag.Error() != nil { + panic(diag.Error()) + } + + return &bundle.Bundle{ + Config: *v, + } +} + +func withFakeVEnv(t *testing.T, path string) { + interpreterPath := interpreterPath(path) + + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + + if err := os.Chdir(t.TempDir()); err != nil { + panic(err) + } + + err = os.MkdirAll(filepath.Dir(interpreterPath), 0755) + if err != nil { + panic(err) + } + + err = os.WriteFile(interpreterPath, []byte(""), 0755) + if err != nil { + panic(err) + } + + t.Cleanup(func() { + if err := os.Chdir(cwd); err != nil { + panic(err) + } + }) +} diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index 60636bcc6..86a03b23e 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/env" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -34,7 +35,7 @@ func TestResolveClusterReference(t *testing.T) { }, }, "some-variable": { - Value: &justString, + Value: justString, }, }, }, @@ -52,8 +53,8 @@ func TestResolveClusterReference(t *testing.T) { diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) require.NoError(t, diags.Error()) - require.Equal(t, "1234-5678-abcd", *b.Config.Variables["my-cluster-id-1"].Value) - require.Equal(t, "9876-5432-xywz", *b.Config.Variables["my-cluster-id-2"].Value) + require.Equal(t, "1234-5678-abcd", b.Config.Variables["my-cluster-id-1"].Value) + require.Equal(t, "9876-5432-xywz", b.Config.Variables["my-cluster-id-2"].Value) } func TestResolveNonExistentClusterReference(t *testing.T) { @@ -68,7 +69,7 @@ func TestResolveNonExistentClusterReference(t *testing.T) { }, }, "some-variable": { - Value: &justString, + Value: justString, }, }, }, @@ -104,7 +105,7 @@ func TestNoLookupIfVariableIsSet(t *testing.T) { diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) require.NoError(t, diags.Error()) - require.Equal(t, "random value", *b.Config.Variables["my-cluster-id"].Value) + require.Equal(t, "random value", b.Config.Variables["my-cluster-id"].Value) } func TestResolveServicePrincipal(t *testing.T) { @@ -131,14 +132,11 @@ func TestResolveServicePrincipal(t *testing.T) { diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) require.NoError(t, diags.Error()) - require.Equal(t, "app-1234", *b.Config.Variables["my-sp"].Value) + require.Equal(t, "app-1234", b.Config.Variables["my-sp"].Value) } func TestResolveVariableReferencesInVariableLookups(t *testing.T) { - s := func(s string) *string { - return &s - } - + s := "bar" b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ @@ -146,7 +144,7 @@ func TestResolveVariableReferencesInVariableLookups(t *testing.T) { }, Variables: map[string]*variable.Variable{ "foo": { - Value: s("bar"), + Value: s, }, "lookup": { Lookup: &variable.Lookup{ @@ -167,7 +165,7 @@ func TestResolveVariableReferencesInVariableLookups(t *testing.T) { diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences())) require.NoError(t, diags.Error()) require.Equal(t, "cluster-bar-dev", b.Config.Variables["lookup"].Lookup.Cluster) - require.Equal(t, "1234-5678-abcd", *b.Config.Variables["lookup"].Value) + require.Equal(t, "1234-5678-abcd", b.Config.Variables["lookup"].Value) } func TestResolveLookupVariableReferencesInVariableLookups(t *testing.T) { @@ -194,3 +192,30 @@ func TestResolveLookupVariableReferencesInVariableLookups(t *testing.T) { diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences())) require.ErrorContains(t, diags.Error(), "lookup variables cannot contain references to another lookup variables") } + +func TestNoResolveLookupIfVariableSetWithEnvVariable(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Target: "dev", + }, + Variables: map[string]*variable.Variable{ + "lookup": { + Lookup: &variable.Lookup{ + Cluster: "cluster-${bundle.target}", + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + ctx := context.Background() + ctx = env.Set(ctx, "BUNDLE_VAR_lookup", "1234-5678-abcd") + + diags := bundle.Apply(ctx, b, bundle.Seq(SetVariables(), ResolveVariableReferencesInLookup(), ResolveResourceReferences())) + require.NoError(t, diags.Error()) + require.Equal(t, "1234-5678-abcd", b.Config.Variables["lookup"].Value) +} diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go index f7fce6c82..61940be56 100644 --- a/bundle/config/mutator/resolve_variable_references.go +++ b/bundle/config/mutator/resolve_variable_references.go @@ -17,6 +17,7 @@ type resolveVariableReferences struct { prefixes []string pattern dyn.Pattern lookupFn func(dyn.Value, dyn.Path) (dyn.Value, error) + skipFn func(dyn.Value) bool } func ResolveVariableReferences(prefixes ...string) bundle.Mutator { @@ -31,6 +32,18 @@ func ResolveVariableReferencesInLookup() bundle.Mutator { }, pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("lookup")), lookupFn: lookupForVariables} } +func ResolveVariableReferencesInComplexVariables() bundle.Mutator { + return &resolveVariableReferences{prefixes: []string{ + "bundle", + "workspace", + "variables", + }, + pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("value")), + lookupFn: lookupForComplexVariables, + skipFn: skipResolvingInNonComplexVariables, + } +} + func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) { // Future opportunity: if we lookup this path in both the given root // and the synthesized root, we know if it was explicitly set or implied to be empty. @@ -38,6 +51,38 @@ func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) { return dyn.GetByPath(v, path) } +func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { + if path[0].Key() != "variables" { + return lookup(v, path) + } + + varV, err := dyn.GetByPath(v, path[:len(path)-1]) + if err != nil { + return dyn.InvalidValue, err + } + + var vv variable.Variable + err = convert.ToTyped(&vv, varV) + if err != nil { + return dyn.InvalidValue, err + } + + if vv.Type == variable.VariableTypeComplex { + return dyn.InvalidValue, fmt.Errorf("complex variables cannot contain references to another complex variables") + } + + return lookup(v, path) +} + +func skipResolvingInNonComplexVariables(v dyn.Value) bool { + switch v.Kind() { + case dyn.KindMap, dyn.KindSequence: + return false + default: + return true + } +} + func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { if path[0].Key() != "variables" { return lookup(v, path) @@ -100,17 +145,27 @@ func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) // Resolve variable references in all values. return dynvar.Resolve(v, func(path dyn.Path) (dyn.Value, error) { // Rewrite the shorthand path ${var.foo} into ${variables.foo.value}. - if path.HasPrefix(varPath) && len(path) == 2 { - path = dyn.NewPath( + if path.HasPrefix(varPath) { + newPath := dyn.NewPath( dyn.Key("variables"), path[1], dyn.Key("value"), ) + + if len(path) > 2 { + newPath = newPath.Append(path[2:]...) + } + + path = newPath } // Perform resolution only if the path starts with one of the specified prefixes. for _, prefix := range prefixes { if path.HasPrefix(prefix) { + // Skip resolution if there is a skip function and it returns true. + if m.skipFn != nil && m.skipFn(v) { + return dyn.InvalidValue, dynvar.ErrSkipResolution + } return m.lookupFn(normalized, path) } } diff --git a/bundle/config/mutator/resolve_variable_references_test.go b/bundle/config/mutator/resolve_variable_references_test.go index 651ea3d2c..7bb6f11a0 100644 --- a/bundle/config/mutator/resolve_variable_references_test.go +++ b/bundle/config/mutator/resolve_variable_references_test.go @@ -43,10 +43,6 @@ func TestResolveVariableReferences(t *testing.T) { } func TestResolveVariableReferencesToBundleVariables(t *testing.T) { - s := func(s string) *string { - return &s - } - b := &bundle.Bundle{ Config: config.Root{ Bundle: config.Bundle{ @@ -57,7 +53,7 @@ func TestResolveVariableReferencesToBundleVariables(t *testing.T) { }, Variables: map[string]*variable.Variable{ "foo": { - Value: s("bar"), + Value: "bar", }, }, }, @@ -195,3 +191,246 @@ func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { assert.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MaxWorkers) assert.Equal(t, 0.5, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.AzureAttributes.SpotBidMaxPrice) } + +func TestResolveComplexVariable(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "cluster": { + Value: map[string]any{ + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "random", + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0]") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("new_cluster")), dyn.V("${var.cluster}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables")) + require.NoError(t, diags.Error()) + require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NumWorkers) +} + +func TestResolveComplexVariableReferencesToFields(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "cluster": { + Value: map[string]any{ + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "random", + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0].new_cluster") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("node_type_id")), dyn.V("${var.cluster.node_type_id}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables")) + require.NoError(t, diags.Error()) + require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId) +} + +func TestResolveComplexVariableReferencesWithComplexVariablesError(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "cluster": { + Value: map[string]any{ + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + "spark_conf": "${var.spark_conf}", + }, + Type: variable.VariableTypeComplex, + }, + "spark_conf": { + Value: map[string]any{ + "spark.executor.memory": "4g", + "spark.executor.cores": "2", + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "random", + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0]") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("new_cluster")), dyn.V("${var.cluster}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, bundle.Seq(ResolveVariableReferencesInComplexVariables(), ResolveVariableReferences("bundle", "workspace", "variables"))) + require.ErrorContains(t, diags.Error(), "complex variables cannot contain references to another complex variables") +} + +func TestResolveComplexVariableWithVarReference(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "package_version": { + Value: "1.0.0", + }, + "cluster_libraries": { + Value: [](map[string]any){ + { + "pypi": map[string]string{ + "package": "cicd_template==${var.package_version}", + }, + }, + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{}, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.tasks[0]") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("libraries")), dyn.V("${var.cluster_libraries}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, bundle.Seq( + ResolveVariableReferencesInComplexVariables(), + ResolveVariableReferences("bundle", "workspace", "variables"), + )) + require.NoError(t, diags.Error()) + require.Equal(t, "cicd_template==1.0.0", b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[0].Pypi.Package) +} diff --git a/bundle/config/mutator/rewrite_sync_paths.go b/bundle/config/mutator/rewrite_sync_paths.go index 710190230..85db79797 100644 --- a/bundle/config/mutator/rewrite_sync_paths.go +++ b/bundle/config/mutator/rewrite_sync_paths.go @@ -35,7 +35,7 @@ func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { dir := filepath.Dir(v.Location().File) rel, err := filepath.Rel(root, dir) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } return dyn.NewValue(filepath.Join(rel, v.MustString()), v.Location()), nil @@ -47,11 +47,11 @@ func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Dia return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) { v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath))) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath))) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } return v, nil }) diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 4c611a18d..e4bdbaa1a 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -42,14 +42,26 @@ func reportRunAsNotSupported(resourceType string, location dyn.Location, current func validateRunAs(b *bundle.Bundle) diag.Diagnostics { diags := diag.Diagnostics{} - runAs := b.Config.RunAs - // Error if neither service_principal_name nor user_name are specified - if runAs.ServicePrincipalName == "" && runAs.UserName == "" { - diags = diags.Extend(diag.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as"))) + neitherSpecifiedErr := diag.Diagnostics{{ + Summary: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified", + Location: b.Config.GetLocation("run_as"), + Severity: diag.Error, + }} + + // Fail fast if neither service_principal_name nor user_name are specified, but the + // run_as section is present. + if b.Config.Value().Get("run_as").Kind() == dyn.KindNil { + return neitherSpecifiedErr + } + + // Fail fast if one or both of service_principal_name and user_name are specified, + // but with empty values. + runAs := b.Config.RunAs + if runAs.ServicePrincipalName == "" && runAs.UserName == "" { + return neitherSpecifiedErr } - // Error if both service_principal_name and user_name are specified if runAs.UserName != "" && runAs.ServicePrincipalName != "" { diags = diags.Extend(diag.Diagnostics{{ Summary: "run_as section cannot specify both user_name and service_principal_name", @@ -151,8 +163,7 @@ func setPipelineOwnersToRunAsIdentity(b *bundle.Bundle) { func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { // Mutator is a no-op if run_as is not specified in the bundle - runAs := b.Config.RunAs - if runAs == nil { + if b.Config.Value().Get("run_as").Kind() == dyn.KindInvalid { return nil } diff --git a/bundle/config/mutator/run_as_test.go b/bundle/config/mutator/run_as_test.go index 236749550..5ad4b6933 100644 --- a/bundle/config/mutator/run_as_test.go +++ b/bundle/config/mutator/run_as_test.go @@ -18,7 +18,7 @@ import ( func allResourceTypes(t *testing.T) []string { // Compute supported resource types based on the `Resources{}` struct. - r := config.Resources{} + r := &config.Resources{} rv, err := convert.FromTyped(r, dyn.NilValue) require.NoError(t, err) normalized, _ := convert.Normalize(r, rv, convert.IncludeMissingFields) @@ -154,6 +154,11 @@ func TestRunAsErrorForUnsupportedResources(t *testing.T) { v, err := convert.FromTyped(base, dyn.NilValue) require.NoError(t, err) + // Define top level resources key in the bundle configuration. + // This is not part of the typed configuration, so we need to add it manually. + v, err = dyn.Set(v, "resources", dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + for _, rt := range allResourceTypes(t) { // Skip allowed resources if slices.Contains(allowList, rt) { diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index eae1fe2ab..b3a9cf400 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -30,6 +30,10 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Di // case: read and set variable value from process environment envVarName := bundleVarPrefix + name if val, ok := env.Lookup(ctx, envVarName); ok { + if v.IsComplex() { + return diag.Errorf(`setting via environment variables (%s) is not supported for complex variable %s`, envVarName, name) + } + err := v.Set(val) if err != nil { return diag.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %v`, val, name, envVarName, err) @@ -37,21 +41,21 @@ func setVariable(ctx context.Context, v *variable.Variable, name string) diag.Di return nil } - // case: Set the variable to its default value - if v.HasDefault() { - err := v.Set(*v.Default) - if err != nil { - return diag.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, *v.Default, name, err) - } - return nil - } - // case: Defined a variable for named lookup for a resource // It will be resolved later in ResolveResourceReferences mutator if v.Lookup != nil { return nil } + // case: Set the variable to its default value + if v.HasDefault() { + err := v.Set(v.Default) + if err != nil { + return diag.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, v.Default, name, err) + } + return nil + } + // We should have had a value to set for the variable at this point. return diag.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) } diff --git a/bundle/config/mutator/set_variables_test.go b/bundle/config/mutator/set_variables_test.go index ae4f79896..65dedee97 100644 --- a/bundle/config/mutator/set_variables_test.go +++ b/bundle/config/mutator/set_variables_test.go @@ -15,7 +15,7 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) { defaultVal := "default" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, + Default: defaultVal, } // set value for variable as an environment variable @@ -23,19 +23,19 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) { diags := setVariable(context.Background(), &variable, "foo") require.NoError(t, diags.Error()) - assert.Equal(t, *variable.Value, "process-env") + assert.Equal(t, variable.Value, "process-env") } func TestSetVariableUsingDefaultValue(t *testing.T) { defaultVal := "default" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, + Default: defaultVal, } diags := setVariable(context.Background(), &variable, "foo") require.NoError(t, diags.Error()) - assert.Equal(t, *variable.Value, "default") + assert.Equal(t, variable.Value, "default") } func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { @@ -43,15 +43,15 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { val := "assigned-value" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, - Value: &val, + Default: defaultVal, + Value: val, } // since a value is already assigned to the variable, it would not be overridden // by the default value diags := setVariable(context.Background(), &variable, "foo") require.NoError(t, diags.Error()) - assert.Equal(t, *variable.Value, "assigned-value") + assert.Equal(t, variable.Value, "assigned-value") } func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { @@ -59,8 +59,8 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { val := "assigned-value" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, - Value: &val, + Default: defaultVal, + Value: val, } // set value for variable as an environment variable @@ -70,7 +70,7 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { // by the value from environment diags := setVariable(context.Background(), &variable, "foo") require.NoError(t, diags.Error()) - assert.Equal(t, *variable.Value, "assigned-value") + assert.Equal(t, variable.Value, "assigned-value") } func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) { @@ -92,15 +92,15 @@ func TestSetVariablesMutator(t *testing.T) { Variables: map[string]*variable.Variable{ "a": { Description: "resolved to default value", - Default: &defaultValForA, + Default: defaultValForA, }, "b": { Description: "resolved from environment vairables", - Default: &defaultValForB, + Default: defaultValForB, }, "c": { Description: "has already been assigned a value", - Value: &valForC, + Value: valForC, }, }, }, @@ -110,7 +110,22 @@ func TestSetVariablesMutator(t *testing.T) { diags := bundle.Apply(context.Background(), b, SetVariables()) require.NoError(t, diags.Error()) - assert.Equal(t, "default-a", *b.Config.Variables["a"].Value) - assert.Equal(t, "env-var-b", *b.Config.Variables["b"].Value) - assert.Equal(t, "assigned-val-c", *b.Config.Variables["c"].Value) + assert.Equal(t, "default-a", b.Config.Variables["a"].Value) + assert.Equal(t, "env-var-b", b.Config.Variables["b"].Value) + assert.Equal(t, "assigned-val-c", b.Config.Variables["c"].Value) +} + +func TestSetComplexVariablesViaEnvVariablesIsNotAllowed(t *testing.T) { + defaultVal := "default" + variable := variable.Variable{ + Description: "a test variable", + Default: defaultVal, + Type: variable.VariableTypeComplex, + } + + // set value for variable as an environment variable + t.Setenv("BUNDLE_VAR_foo", "process-env") + + diags := setVariable(context.Background(), &variable, "foo") + assert.ErrorContains(t, diags.Error(), "setting via environment variables (BUNDLE_VAR_foo) is not supported for complex variable foo") } diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index 18a09dfd6..a01d3d6a7 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -4,8 +4,8 @@ import ( "context" "errors" "fmt" + "io/fs" "net/url" - "os" "path" "path/filepath" "strings" @@ -32,9 +32,7 @@ func (err ErrIsNotNotebook) Error() string { return fmt.Sprintf("file at %s is not a notebook", err.path) } -type translatePaths struct { - seen map[string]string -} +type translatePaths struct{} // TranslatePaths converts paths to local notebook files into paths in the workspace file system. func TranslatePaths() bundle.Mutator { @@ -47,6 +45,18 @@ func (m *translatePaths) Name() string { type rewriteFunc func(literal, localFullPath, localRelPath, remotePath string) (string, error) +// translateContext is a context for rewriting paths in a config. +// It is freshly instantiated on every mutator apply call. +// It provides access to the underlying bundle object such that +// it doesn't have to be passed around explicitly. +type translateContext struct { + b *bundle.Bundle + + // seen is a map of local paths to their corresponding remote paths. + // If a local path has already been successfully resolved, we do not need to resolve it again. + seen map[string]string +} + // rewritePath converts a given relative path from the loaded config to a new path based on the passed rewriting function // // It takes these arguments: @@ -56,14 +66,13 @@ type rewriteFunc func(literal, localFullPath, localRelPath, remotePath string) ( // This logic is different between regular files or notebooks. // // The function returns an error if it is impossible to rewrite the given relative path. -func (m *translatePaths) rewritePath( +func (t *translateContext) rewritePath( dir string, - b *bundle.Bundle, p *string, fn rewriteFunc, ) error { // We assume absolute paths point to a location in the workspace - if path.IsAbs(filepath.ToSlash(*p)) { + if path.IsAbs(*p) { return nil } @@ -79,13 +88,14 @@ func (m *translatePaths) rewritePath( // Local path is relative to the directory the resource was defined in. localPath := filepath.Join(dir, filepath.FromSlash(*p)) - if interp, ok := m.seen[localPath]; ok { + if interp, ok := t.seen[localPath]; ok { *p = interp return nil } - // Remote path must be relative to the bundle root. - localRelPath, err := filepath.Rel(b.RootPath, localPath) + // Local path must be contained in the bundle root. + // If it isn't, it won't be synchronized into the workspace. + localRelPath, err := filepath.Rel(t.b.RootPath, localPath) if err != nil { return err } @@ -94,22 +104,22 @@ func (m *translatePaths) rewritePath( } // Prefix remote path with its remote root path. - remotePath := path.Join(b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath)) + remotePath := path.Join(t.b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath)) // Convert local path into workspace path via specified function. - interp, err := fn(*p, localPath, localRelPath, filepath.ToSlash(remotePath)) + interp, err := fn(*p, localPath, localRelPath, remotePath) if err != nil { return err } *p = interp - m.seen[localPath] = interp + t.seen[localPath] = interp return nil } -func translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - nb, _, err := notebook.Detect(localFullPath) - if os.IsNotExist(err) { +func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath)) + if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("notebook %s not found", literal) } if err != nil { @@ -123,9 +133,9 @@ func translateNotebookPath(literal, localFullPath, localRelPath, remotePath stri return strings.TrimSuffix(remotePath, filepath.Ext(localFullPath)), nil } -func translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - nb, _, err := notebook.Detect(localFullPath) - if os.IsNotExist(err) { +func (t *translateContext) translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath)) + if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("file %s not found", literal) } if err != nil { @@ -137,8 +147,8 @@ func translateFilePath(literal, localFullPath, localRelPath, remotePath string) return remotePath, nil } -func translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - info, err := os.Stat(localFullPath) +func (t *translateContext) translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + info, err := t.b.BundleRoot.Stat(filepath.ToSlash(localRelPath)) if err != nil { return "", err } @@ -148,20 +158,20 @@ func translateDirectoryPath(literal, localFullPath, localRelPath, remotePath str return remotePath, nil } -func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) { +func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) { return localRelPath, nil } -func translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) { +func (t *translateContext) translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) { if !strings.HasPrefix(localRelPath, ".") { localRelPath = "." + string(filepath.Separator) + localRelPath } return localRelPath, nil } -func (m *translatePaths) rewriteValue(b *bundle.Bundle, p dyn.Path, v dyn.Value, fn rewriteFunc, dir string) (dyn.Value, error) { +func (t *translateContext) rewriteValue(p dyn.Path, v dyn.Value, fn rewriteFunc, dir string) (dyn.Value, error) { out := v.MustString() - err := m.rewritePath(dir, b, &out, fn) + err := t.rewritePath(dir, &out, fn) if err != nil { if target := (&ErrIsNotebook{}); errors.As(err, target) { return dyn.InvalidValue, fmt.Errorf(`expected a file for "%s" but got a notebook: %w`, p, target) @@ -175,15 +185,15 @@ func (m *translatePaths) rewriteValue(b *bundle.Bundle, p dyn.Path, v dyn.Value, return dyn.NewValue(out, v.Location()), nil } -func (m *translatePaths) rewriteRelativeTo(b *bundle.Bundle, p dyn.Path, v dyn.Value, fn rewriteFunc, dir, fallback string) (dyn.Value, error) { - nv, err := m.rewriteValue(b, p, v, fn, dir) +func (t *translateContext) rewriteRelativeTo(p dyn.Path, v dyn.Value, fn rewriteFunc, dir, fallback string) (dyn.Value, error) { + nv, err := t.rewriteValue(p, v, fn, dir) if err == nil { return nv, nil } // If we failed to rewrite the path, try to rewrite it relative to the fallback directory. if fallback != "" { - nv, nerr := m.rewriteValue(b, p, v, fn, fallback) + nv, nerr := t.rewriteValue(p, v, fn, fallback) if nerr == nil { // TODO: Emit a warning that this path should be rewritten. return nv, nil @@ -194,16 +204,19 @@ func (m *translatePaths) rewriteRelativeTo(b *bundle.Bundle, p dyn.Path, v dyn.V } func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { - m.seen = make(map[string]string) + t := &translateContext{ + b: b, + seen: make(map[string]string), + } err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { var err error - for _, fn := range []func(*bundle.Bundle, dyn.Value) (dyn.Value, error){ - m.applyJobTranslations, - m.applyPipelineTranslations, - m.applyArtifactTranslations, + for _, fn := range []func(dyn.Value) (dyn.Value, error){ + t.applyJobTranslations, + t.applyPipelineTranslations, + t.applyArtifactTranslations, } { - v, err = fn(b, v) + v, err = fn(v) if err != nil { return dyn.InvalidValue, err } diff --git a/bundle/config/mutator/translate_paths_artifacts.go b/bundle/config/mutator/translate_paths_artifacts.go index 7bda04eec..921c00c73 100644 --- a/bundle/config/mutator/translate_paths_artifacts.go +++ b/bundle/config/mutator/translate_paths_artifacts.go @@ -3,36 +3,42 @@ package mutator import ( "fmt" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/dyn" ) -func (m *translatePaths) applyArtifactTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { - var err error +type artifactRewritePattern struct { + pattern dyn.Pattern + fn rewriteFunc +} +func (t *translateContext) artifactRewritePatterns() []artifactRewritePattern { // Base pattern to match all artifacts. base := dyn.NewPattern( dyn.Key("artifacts"), dyn.AnyKey(), ) - for _, t := range []struct { - pattern dyn.Pattern - fn rewriteFunc - }{ + // Compile list of configuration paths to rewrite. + return []artifactRewritePattern{ { base.Append(dyn.Key("path")), - translateNoOp, + t.translateNoOp, }, - } { - v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + } +} + +func (t *translateContext) applyArtifactTranslations(v dyn.Value) (dyn.Value, error) { + var err error + + for _, rewritePattern := range t.artifactRewritePatterns() { + v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { key := p[1].Key() dir, err := v.Location().Directory() if err != nil { return dyn.InvalidValue, fmt.Errorf("unable to determine directory for artifact %s: %w", key, err) } - return m.rewriteRelativeTo(b, p, v, t.fn, dir, "") + return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, "") }) if err != nil { return dyn.InvalidValue, err diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index 58b5e0fb0..60cc8bb9a 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -4,7 +4,6 @@ import ( "fmt" "slices" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" "github.com/databricks/cli/libs/dyn" ) @@ -19,55 +18,42 @@ func noSkipRewrite(string) bool { return false } -func rewritePatterns(base dyn.Pattern) []jobRewritePattern { +func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern { return []jobRewritePattern{ { base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")), - translateNotebookPath, + t.translateNotebookPath, noSkipRewrite, }, { base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")), - translateFilePath, + t.translateFilePath, noSkipRewrite, }, { base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")), - translateDirectoryPath, + t.translateDirectoryPath, noSkipRewrite, }, { base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")), - translateFilePath, + t.translateFilePath, noSkipRewrite, }, { base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")), - translateNoOp, + t.translateNoOp, noSkipRewrite, }, { base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")), - translateNoOp, + t.translateNoOp, noSkipRewrite, }, } } -func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { - fallback, err := gatherFallbackPaths(v, "jobs") - if err != nil { - return dyn.InvalidValue, err - } - - // Do not translate job task paths if using Git source - var ignore []string - for key, job := range b.Config.Resources.Jobs { - if job.GitSource != nil { - ignore = append(ignore, key) - } - } - +func (t *translateContext) jobRewritePatterns() []jobRewritePattern { // Base pattern to match all tasks in all jobs. base := dyn.NewPattern( dyn.Key("resources"), @@ -90,19 +76,38 @@ func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dy dyn.Key("dependencies"), dyn.AnyIndex(), ), - translateNoOpWithPrefix, + t.translateNoOpWithPrefix, func(s string) bool { return !libraries.IsEnvironmentDependencyLocal(s) }, }, } - taskPatterns := rewritePatterns(base) - forEachPatterns := rewritePatterns(base.Append(dyn.Key("for_each_task"), dyn.Key("task"))) + + taskPatterns := rewritePatterns(t, base) + forEachPatterns := rewritePatterns(t, base.Append(dyn.Key("for_each_task"), dyn.Key("task"))) allPatterns := append(taskPatterns, jobEnvironmentsPatterns...) allPatterns = append(allPatterns, forEachPatterns...) + return allPatterns +} - for _, t := range allPatterns { - v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { +func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error) { + var err error + + fallback, err := gatherFallbackPaths(v, "jobs") + if err != nil { + return dyn.InvalidValue, err + } + + // Do not translate job task paths if using Git source + var ignore []string + for key, job := range t.b.Config.Resources.Jobs { + if job.GitSource != nil { + ignore = append(ignore, key) + } + } + + for _, rewritePattern := range t.jobRewritePatterns() { + v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { key := p[2].Key() // Skip path translation if the job is using git source. @@ -116,10 +121,10 @@ func (m *translatePaths) applyJobTranslations(b *bundle.Bundle, v dyn.Value) (dy } sv := v.MustString() - if t.skipRewrite(sv) { + if rewritePattern.skipRewrite(sv) { return v, nil } - return m.rewriteRelativeTo(b, p, v, t.fn, dir, fallback[key]) + return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key]) }) if err != nil { return dyn.InvalidValue, err diff --git a/bundle/config/mutator/translate_paths_pipelines.go b/bundle/config/mutator/translate_paths_pipelines.go index 5b2a2c346..71a65e846 100644 --- a/bundle/config/mutator/translate_paths_pipelines.go +++ b/bundle/config/mutator/translate_paths_pipelines.go @@ -3,16 +3,15 @@ package mutator import ( "fmt" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/dyn" ) -func (m *translatePaths) applyPipelineTranslations(b *bundle.Bundle, v dyn.Value) (dyn.Value, error) { - fallback, err := gatherFallbackPaths(v, "pipelines") - if err != nil { - return dyn.InvalidValue, err - } +type pipelineRewritePattern struct { + pattern dyn.Pattern + fn rewriteFunc +} +func (t *translateContext) pipelineRewritePatterns() []pipelineRewritePattern { // Base pattern to match all libraries in all pipelines. base := dyn.NewPattern( dyn.Key("resources"), @@ -22,27 +21,36 @@ func (m *translatePaths) applyPipelineTranslations(b *bundle.Bundle, v dyn.Value dyn.AnyIndex(), ) - for _, t := range []struct { - pattern dyn.Pattern - fn rewriteFunc - }{ + // Compile list of configuration paths to rewrite. + return []pipelineRewritePattern{ { base.Append(dyn.Key("notebook"), dyn.Key("path")), - translateNotebookPath, + t.translateNotebookPath, }, { base.Append(dyn.Key("file"), dyn.Key("path")), - translateFilePath, + t.translateFilePath, }, - } { - v, err = dyn.MapByPattern(v, t.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + } +} + +func (t *translateContext) applyPipelineTranslations(v dyn.Value) (dyn.Value, error) { + var err error + + fallback, err := gatherFallbackPaths(v, "pipelines") + if err != nil { + return dyn.InvalidValue, err + } + + for _, rewritePattern := range t.pipelineRewritePatterns() { + v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { key := p[2].Key() dir, err := v.Location().Directory() if err != nil { return dyn.InvalidValue, fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) } - return m.rewriteRelativeTo(b, p, v, t.fn, dir, fallback[key]) + return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key]) }) if err != nil { return dyn.InvalidValue, err diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 29afb9972..8476ee38a 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -37,7 +38,8 @@ func touchEmptyFile(t *testing.T, path string) { func TestTranslatePathsSkippedWithGitSource(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -107,7 +109,8 @@ func TestTranslatePaths(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -274,7 +277,8 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -368,7 +372,8 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -401,7 +406,8 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -431,7 +437,8 @@ func TestJobFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ @@ -461,7 +468,8 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ @@ -491,7 +499,8 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ @@ -522,7 +531,8 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -556,7 +566,8 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -590,7 +601,8 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -624,7 +636,8 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Workspace: config.Workspace{ FilePath: "/bundle", @@ -659,7 +672,8 @@ func TestTranslatePathJobEnvironments(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "env2.py")) b := &bundle.Bundle{ - RootPath: dir, + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ Resources: config.Resources{ Jobs: map[string]*resources.Job{ diff --git a/bundle/config/root.go b/bundle/config/root.go index 88197c2b8..2bbb78696 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -74,6 +74,10 @@ func Load(path string) (*Root, diag.Diagnostics) { return nil, diag.FromErr(err) } + return LoadFromBytes(path, raw) +} + +func LoadFromBytes(path string, raw []byte) (*Root, diag.Diagnostics) { r := Root{} // Load configuration tree from YAML. @@ -263,6 +267,11 @@ func (r *Root) InitializeVariables(vars []string) error { if _, ok := r.Variables[name]; !ok { return fmt.Errorf("variable %s has not been defined", name) } + + if r.Variables[name].IsComplex() { + return fmt.Errorf("setting variables of complex type via --var flag is not supported: %s", name) + } + err := r.Variables[name].Set(val) if err != nil { return fmt.Errorf("failed to assign %s to %s: %s", val, name, err) @@ -329,15 +338,38 @@ func (r *Root) MergeTargetOverrides(name string) error { "resources", "sync", "permissions", - "variables", } { if root, err = mergeField(root, target, f); err != nil { return err } } + // Merge `variables`. This field must be overwritten if set, not merged. + if v := target.Get("variables"); v.Kind() != dyn.KindInvalid { + _, err = dyn.Map(v, ".", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) { + varPath := dyn.MustPathFromString("variables").Append(p...) + + vDefault := variable.Get("default") + if vDefault.Kind() != dyn.KindInvalid { + defaultPath := varPath.Append(dyn.Key("default")) + root, err = dyn.SetByPath(root, defaultPath, vDefault) + } + + vLookup := variable.Get("lookup") + if vLookup.Kind() != dyn.KindInvalid { + lookupPath := varPath.Append(dyn.Key("lookup")) + root, err = dyn.SetByPath(root, lookupPath, vLookup) + } + + return root, err + })) + if err != nil { + return err + } + } + // Merge `run_as`. This field must be overwritten if set, not merged. - if v := target.Get("run_as"); v != dyn.NilValue { + if v := target.Get("run_as"); v.Kind() != dyn.KindInvalid { root, err = dyn.Set(root, "run_as", v) if err != nil { return err @@ -345,7 +377,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Below, we're setting fields on the bundle key, so make sure it exists. - if root.Get("bundle") == dyn.NilValue { + if root.Get("bundle").Kind() == dyn.KindInvalid { root, err = dyn.Set(root, "bundle", dyn.NewValue(map[string]dyn.Value{}, dyn.Location{})) if err != nil { return err @@ -353,7 +385,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `mode`. This field must be overwritten if set, not merged. - if v := target.Get("mode"); v != dyn.NilValue { + if v := target.Get("mode"); v.Kind() != dyn.KindInvalid { root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("mode")), v) if err != nil { return err @@ -361,7 +393,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `compute_id`. This field must be overwritten if set, not merged. - if v := target.Get("compute_id"); v != dyn.NilValue { + if v := target.Get("compute_id"); v.Kind() != dyn.KindInvalid { root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")), v) if err != nil { return err @@ -369,7 +401,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // Merge `git`. - if v := target.Get("git"); v != dyn.NilValue { + if v := target.Get("git"); v.Kind() != dyn.KindInvalid { ref, err := dyn.GetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git"))) if err != nil { ref = dyn.NewValue(map[string]dyn.Value{}, dyn.Location{}) @@ -382,7 +414,7 @@ func (r *Root) MergeTargetOverrides(name string) error { } // If the branch was overridden, we need to clear the inferred flag. - if branch := v.Get("branch"); branch != dyn.NilValue { + if branch := v.Get("branch"); branch.Kind() != dyn.KindInvalid { out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.NewValue(false, dyn.Location{})) if err != nil { return err @@ -410,12 +442,12 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { // For each target, rewrite the variables block. return dyn.Map(v, "targets", dyn.Foreach(func(_ dyn.Path, target dyn.Value) (dyn.Value, error) { // Confirm it has a variables block. - if target.Get("variables") == dyn.NilValue { + if target.Get("variables").Kind() == dyn.KindInvalid { return target, nil } // For each variable, normalize its contents if it is a single string. - return dyn.Map(target, "variables", dyn.Foreach(func(_ dyn.Path, variable dyn.Value) (dyn.Value, error) { + return dyn.Map(target, "variables", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) { switch variable.Kind() { case dyn.KindString, dyn.KindBool, dyn.KindFloat, dyn.KindInt: @@ -426,6 +458,22 @@ func rewriteShorthands(v dyn.Value) (dyn.Value, error) { "default": variable, }, variable.Location()), nil + case dyn.KindMap, dyn.KindSequence: + // Check if the original definition of variable has a type field. + typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type"))) + if err != nil { + return variable, nil + } + + if typeV.MustString() == "complex" { + return dyn.NewValue(map[string]dyn.Value{ + "type": typeV, + "default": variable, + }, variable.Location()), nil + } + + return variable, nil + default: return variable, nil } @@ -440,15 +488,19 @@ func validateVariableOverrides(root, target dyn.Value) (err error) { var tv map[string]variable.Variable // Collect variables from the root. - err = convert.ToTyped(&rv, root.Get("variables")) - if err != nil { - return fmt.Errorf("unable to collect variables from root: %w", err) + if v := root.Get("variables"); v.Kind() != dyn.KindInvalid { + err = convert.ToTyped(&rv, v) + if err != nil { + return fmt.Errorf("unable to collect variables from root: %w", err) + } } // Collect variables from the target. - err = convert.ToTyped(&tv, target.Get("variables")) - if err != nil { - return fmt.Errorf("unable to collect variables from target: %w", err) + if v := target.Get("variables"); v.Kind() != dyn.KindInvalid { + err = convert.ToTyped(&tv, v) + if err != nil { + return fmt.Errorf("unable to collect variables from target: %w", err) + } } // Check that all variables in the target exist in the root. @@ -471,3 +523,9 @@ func (r Root) GetLocation(path string) dyn.Location { } return v.Location() } + +// Value returns the dynamic configuration value of the root object. This value +// is the source of truth and is kept in sync with values in the typed configuration. +func (r Root) Value() dyn.Value { + return r.value +} diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index b56768848..aed670d6c 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -51,7 +51,7 @@ func TestInitializeVariables(t *testing.T) { root := &Root{ Variables: map[string]*variable.Variable{ "foo": { - Default: &fooDefault, + Default: fooDefault, Description: "an optional variable since default is defined", }, "bar": { @@ -62,8 +62,8 @@ func TestInitializeVariables(t *testing.T) { err := root.InitializeVariables([]string{"foo=123", "bar=456"}) assert.NoError(t, err) - assert.Equal(t, "123", *(root.Variables["foo"].Value)) - assert.Equal(t, "456", *(root.Variables["bar"].Value)) + assert.Equal(t, "123", (root.Variables["foo"].Value)) + assert.Equal(t, "456", (root.Variables["bar"].Value)) } func TestInitializeVariablesWithAnEqualSignInValue(t *testing.T) { @@ -77,7 +77,7 @@ func TestInitializeVariablesWithAnEqualSignInValue(t *testing.T) { err := root.InitializeVariables([]string{"foo=123=567"}) assert.NoError(t, err) - assert.Equal(t, "123=567", *(root.Variables["foo"].Value)) + assert.Equal(t, "123=567", (root.Variables["foo"].Value)) } func TestInitializeVariablesInvalidFormat(t *testing.T) { @@ -119,3 +119,69 @@ func TestRootMergeTargetOverridesWithMode(t *testing.T) { require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, Development, root.Bundle.Mode) } + +func TestInitializeComplexVariablesViaFlagIsNotAllowed(t *testing.T) { + root := &Root{ + Variables: map[string]*variable.Variable{ + "foo": { + Type: variable.VariableTypeComplex, + }, + }, + } + + err := root.InitializeVariables([]string{"foo=123"}) + assert.ErrorContains(t, err, "setting variables of complex type via --var flag is not supported: foo") +} + +func TestRootMergeTargetOverridesWithVariables(t *testing.T) { + root := &Root{ + Bundle: Bundle{}, + Variables: map[string]*variable.Variable{ + "foo": { + Default: "foo", + Description: "foo var", + }, + "foo2": { + Default: "foo2", + Description: "foo2 var", + }, + "complex": { + Type: variable.VariableTypeComplex, + Description: "complex var", + Default: map[string]interface{}{ + "key": "value", + }, + }, + }, + Targets: map[string]*Target{ + "development": { + Variables: map[string]*variable.Variable{ + "foo": { + Default: "bar", + Description: "wrong", + }, + "complex": { + Type: "wrong", + Description: "wrong", + Default: map[string]interface{}{ + "key1": "value1", + }, + }, + }, + }, + }, + } + root.initializeDynamicValue() + require.NoError(t, root.MergeTargetOverrides("development")) + assert.Equal(t, "bar", root.Variables["foo"].Default) + assert.Equal(t, "foo var", root.Variables["foo"].Description) + + assert.Equal(t, "foo2", root.Variables["foo2"].Default) + assert.Equal(t, "foo2 var", root.Variables["foo2"].Description) + + assert.Equal(t, map[string]interface{}{ + "key1": "value1", + }, root.Variables["complex"].Default) + assert.Equal(t, "complex var", root.Variables["complex"].Description) + +} diff --git a/bundle/config/validate/validate_sync_patterns.go b/bundle/config/validate/validate_sync_patterns.go index 832efede9..a04c10776 100644 --- a/bundle/config/validate/validate_sync_patterns.go +++ b/bundle/config/validate/validate_sync_patterns.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/fileset" - "github.com/databricks/cli/libs/vfs" "golang.org/x/sync/errgroup" ) @@ -51,7 +50,7 @@ func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (di index := i p := pattern errs.Go(func() error { - fs, err := fileset.NewGlobSet(vfs.MustNew(rb.RootPath()), []string{p}) + fs, err := fileset.NewGlobSet(rb.BundleRoot(), []string{p}) if err != nil { return err } diff --git a/bundle/config/variable/variable.go b/bundle/config/variable/variable.go index 5e700a9b0..ba94f9c8a 100644 --- a/bundle/config/variable/variable.go +++ b/bundle/config/variable/variable.go @@ -2,12 +2,27 @@ package variable import ( "fmt" + "reflect" +) + +// We are using `any` because since introduction of complex variables, +// variables can be of any type. +// Type alias is used to make it easier to understand the code. +type VariableValue = any + +type VariableType string + +const ( + VariableTypeComplex VariableType = "complex" ) // An input variable for the bundle config type Variable struct { + // A type of the variable. This is used to validate the value of the variable + Type VariableType `json:"type,omitempty"` + // A default value which then makes the variable optional - Default *string `json:"default,omitempty"` + Default VariableValue `json:"default,omitempty"` // Documentation for this input variable Description string `json:"description,omitempty"` @@ -21,7 +36,7 @@ type Variable struct { // 4. Default value defined in variable definition // 5. Throw error, since if no default value is defined, then the variable // is required - Value *string `json:"value,omitempty" bundle:"readonly"` + Value VariableValue `json:"value,omitempty" bundle:"readonly"` // The value of this field will be used to lookup the resource by name // And assign the value of the variable to ID of the resource found. @@ -39,10 +54,24 @@ func (v *Variable) HasValue() bool { return v.Value != nil } -func (v *Variable) Set(val string) error { +func (v *Variable) Set(val VariableValue) error { if v.HasValue() { - return fmt.Errorf("variable has already been assigned value: %s", *v.Value) + return fmt.Errorf("variable has already been assigned value: %s", v.Value) } - v.Value = &val + + rv := reflect.ValueOf(val) + switch rv.Kind() { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + if v.Type != VariableTypeComplex { + return fmt.Errorf("variable type is not complex") + } + } + + v.Value = val + return nil } + +func (v *Variable) IsComplex() bool { + return v.Type == VariableTypeComplex +} diff --git a/bundle/deploy/files/delete.go b/bundle/deploy/files/delete.go index 066368a6b..133971449 100644 --- a/bundle/deploy/files/delete.go +++ b/bundle/deploy/files/delete.go @@ -2,7 +2,9 @@ package files import ( "context" + "errors" "fmt" + "io/fs" "os" "github.com/databricks/cli/bundle" @@ -67,7 +69,7 @@ func deleteSnapshotFile(ctx context.Context, b *bundle.Bundle) error { return err } err = os.Remove(sp) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("failed to destroy sync snapshot file: %s", err) } return nil diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index 8d6efdae3..a308668d3 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -6,7 +6,6 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/sync" - "github.com/databricks/cli/libs/vfs" ) func GetSync(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.Sync, error) { @@ -29,7 +28,7 @@ func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOp } opts := &sync.SyncOptions{ - LocalPath: vfs.MustNew(rb.RootPath()), + LocalPath: rb.BundleRoot(), RemotePath: rb.Config().Workspace.FilePath, Include: includes, Exclude: rb.Config().Sync.Exclude, diff --git a/bundle/deploy/files/upload.go b/bundle/deploy/files/upload.go index b0211c8b7..d8c4a607d 100644 --- a/bundle/deploy/files/upload.go +++ b/bundle/deploy/files/upload.go @@ -26,7 +26,7 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { return diag.FromErr(err) } - err = sync.RunOnce(ctx) + b.Files, err = sync.RunOnce(ctx) if err != nil { if errors.Is(err, fs.ErrPermission) { return permissions.ReportPermissionDenied(ctx, b, b.Config.Workspace.StatePath) diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go index ccff64fe7..97048811b 100644 --- a/bundle/deploy/state.go +++ b/bundle/deploy/state.go @@ -6,7 +6,6 @@ import ( "fmt" "io" "io/fs" - "os" "path/filepath" "time" @@ -59,8 +58,8 @@ type entry struct { info fs.FileInfo } -func newEntry(path string) *entry { - info, err := os.Stat(path) +func newEntry(root vfs.Path, path string) *entry { + info, err := root.Stat(path) if err != nil { return &entry{path, nil} } @@ -111,11 +110,10 @@ func FromSlice(files []fileset.File) (Filelist, error) { return f, nil } -func (f Filelist) ToSlice(basePath string) []fileset.File { +func (f Filelist) ToSlice(root vfs.Path) []fileset.File { var files []fileset.File - root := vfs.MustNew(basePath) for _, file := range f { - entry := newEntry(filepath.Join(basePath, file.LocalPath)) + entry := newEntry(root, filepath.ToSlash(file.LocalPath)) // Snapshots created with versions <= v0.220.0 use platform-specific // paths (i.e. with backslashes). Files returned by [libs/fileset] always diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go index 57b38ec6c..24ed9d360 100644 --- a/bundle/deploy/state_pull.go +++ b/bundle/deploy/state_pull.go @@ -85,7 +85,7 @@ func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostic } log.Infof(ctx, "Creating new snapshot") - snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.RootPath), opts) + snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.BundleRoot), opts) if err != nil { return diag.FromErr(err) } diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go index bcb88374f..38f0b4021 100644 --- a/bundle/deploy/state_pull_test.go +++ b/bundle/deploy/state_pull_test.go @@ -4,7 +4,9 @@ import ( "bytes" "context" "encoding/json" + "errors" "io" + "io/fs" "os" "testing" @@ -15,6 +17,7 @@ import ( "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/sync" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go/service/iam" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" @@ -57,8 +60,10 @@ func testStatePull(t *testing.T, opts statePullOpts) { return f, nil }} + tmpDir := t.TempDir() b := &bundle.Bundle{ - RootPath: t.TempDir(), + RootPath: tmpDir, + BundleRoot: vfs.MustNew(tmpDir), Config: config.Root{ Bundle: config.Bundle{ Target: "default", @@ -270,7 +275,7 @@ func TestStatePullNoState(t *testing.T) { require.NoError(t, err) _, err = os.Stat(statePath) - require.True(t, os.IsNotExist(err)) + require.True(t, errors.Is(err, fs.ErrNotExist)) } func TestStatePullOlderState(t *testing.T) { diff --git a/bundle/deploy/state_test.go b/bundle/deploy/state_test.go index efa051ab6..5e1e54230 100644 --- a/bundle/deploy/state_test.go +++ b/bundle/deploy/state_test.go @@ -32,7 +32,8 @@ func TestFromSlice(t *testing.T) { func TestToSlice(t *testing.T) { tmpDir := t.TempDir() - fileset := fileset.New(vfs.MustNew(tmpDir)) + root := vfs.MustNew(tmpDir) + fileset := fileset.New(root) testutil.Touch(t, tmpDir, "test1.py") testutil.Touch(t, tmpDir, "test2.py") testutil.Touch(t, tmpDir, "test3.py") @@ -44,7 +45,7 @@ func TestToSlice(t *testing.T) { require.NoError(t, err) require.Len(t, f, 3) - s := f.ToSlice(tmpDir) + s := f.ToSlice(root) require.Len(t, s, 3) for _, file := range s { diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go index 885e47a7a..bfdb308c4 100644 --- a/bundle/deploy/state_update.go +++ b/bundle/deploy/state_update.go @@ -4,12 +4,13 @@ import ( "bytes" "context" "encoding/json" + "errors" "io" + "io/fs" "os" "time" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" @@ -38,19 +39,8 @@ func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnost state.CliVersion = build.GetInfo().Version state.Version = DeploymentStateVersion - // Get the current file list. - sync, err := files.GetSync(ctx, bundle.ReadOnly(b)) - if err != nil { - return diag.FromErr(err) - } - - files, err := sync.GetFileList(ctx) - if err != nil { - return diag.FromErr(err) - } - - // Update the state with the current file list. - fl, err := FromSlice(files) + // Update the state with the current list of synced files. + fl, err := FromSlice(b.Files) if err != nil { return diag.FromErr(err) } @@ -95,7 +85,7 @@ func load(ctx context.Context, b *bundle.Bundle) (*DeploymentState, error) { log.Infof(ctx, "Loading deployment state from %s", statePath) f, err := os.Open(statePath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { log.Infof(ctx, "No deployment state file found") return &DeploymentState{ Version: DeploymentStateVersion, diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go index dd8a1336e..ed72439d2 100644 --- a/bundle/deploy/state_update_test.go +++ b/bundle/deploy/state_update_test.go @@ -10,19 +10,23 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/internal/build" "github.com/databricks/cli/internal/testutil" - databrickscfg "github.com/databricks/databricks-sdk-go/config" - "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go/service/iam" - "github.com/databricks/databricks-sdk-go/service/workspace" - "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) -func TestStateUpdate(t *testing.T) { - s := &stateUpdate{} +func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle { + tmpDir := t.TempDir() - b := &bundle.Bundle{ - RootPath: t.TempDir(), + testutil.Touch(t, tmpDir, "test1.py") + testutil.TouchNotebook(t, tmpDir, "test2.py") + + files, err := fileset.New(vfs.MustNew(tmpDir)).All() + require.NoError(t, err) + + return &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ Bundle: config.Bundle{ Target: "default", @@ -37,22 +41,14 @@ func TestStateUpdate(t *testing.T) { }, }, }, + Files: files, } +} - testutil.Touch(t, b.RootPath, "test1.py") - testutil.Touch(t, b.RootPath, "test2.py") - - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &databrickscfg.Config{ - Host: "https://test.com", - } - b.SetWorkpaceClient(m.WorkspaceClient) - - wsApi := m.GetMockWorkspaceAPI() - wsApi.EXPECT().GetStatusByPath(mock.Anything, "/files").Return(&workspace.ObjectInfo{ - ObjectType: "DIRECTORY", - }, nil) +func TestStateUpdate(t *testing.T) { + s := &stateUpdate{} + b := setupBundleForStateUpdate(t) ctx := context.Background() diags := bundle.Apply(ctx, b, s) @@ -63,7 +59,15 @@ func TestStateUpdate(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(1), state.Seq) - require.Len(t, state.Files, 3) + require.Equal(t, state.Files, Filelist{ + { + LocalPath: "test1.py", + }, + { + LocalPath: "test2.py", + IsNotebook: true, + }, + }) require.Equal(t, build.GetInfo().Version, state.CliVersion) diags = bundle.Apply(ctx, b, s) @@ -74,45 +78,22 @@ func TestStateUpdate(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(2), state.Seq) - require.Len(t, state.Files, 3) + require.Equal(t, state.Files, Filelist{ + { + LocalPath: "test1.py", + }, + { + LocalPath: "test2.py", + IsNotebook: true, + }, + }) require.Equal(t, build.GetInfo().Version, state.CliVersion) } func TestStateUpdateWithExistingState(t *testing.T) { s := &stateUpdate{} - b := &bundle.Bundle{ - RootPath: t.TempDir(), - Config: config.Root{ - Bundle: config.Bundle{ - Target: "default", - }, - Workspace: config.Workspace{ - StatePath: "/state", - FilePath: "/files", - CurrentUser: &config.User{ - User: &iam.User{ - UserName: "test-user", - }, - }, - }, - }, - } - - testutil.Touch(t, b.RootPath, "test1.py") - testutil.Touch(t, b.RootPath, "test2.py") - - m := mocks.NewMockWorkspaceClient(t) - m.WorkspaceClient.Config = &databrickscfg.Config{ - Host: "https://test.com", - } - b.SetWorkpaceClient(m.WorkspaceClient) - - wsApi := m.GetMockWorkspaceAPI() - wsApi.EXPECT().GetStatusByPath(mock.Anything, "/files").Return(&workspace.ObjectInfo{ - ObjectType: "DIRECTORY", - }, nil) - + b := setupBundleForStateUpdate(t) ctx := context.Background() // Create an existing state file. @@ -144,6 +125,14 @@ func TestStateUpdateWithExistingState(t *testing.T) { require.NoError(t, err) require.Equal(t, int64(11), state.Seq) - require.Len(t, state.Files, 3) + require.Equal(t, state.Files, Filelist{ + { + LocalPath: "test1.py", + }, + { + LocalPath: "test2.py", + IsNotebook: true, + }, + }) require.Equal(t, build.GetInfo().Version, state.CliVersion) } diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index e1f73be28..7ea448538 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -455,6 +455,24 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { var src = resources.ModelServingEndpoint{ CreateServingEndpoint: &serving.CreateServingEndpoint{ Name: "name", + + // Need to specify this to satisfy the equivalence test: + // The previous method of generation includes the "create" field + // because it is required (not marked as `omitempty`). + // The previous method used [json.Marshal] from the standard library + // and as such observed the `omitempty` tag. + // The new method leverages [dyn.Value] where any field that is not + // explicitly set is not part of the value. + Config: serving.EndpointCoreConfigInput{ + ServedModels: []serving.ServedModelInput{ + { + ModelName: "model_name", + ModelVersion: "1", + ScaleToZeroEnabled: true, + WorkloadSize: "Small", + }, + }, + }, }, Permissions: []resources.Permission{ { diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 69ae70ba6..d480242ce 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -2,7 +2,9 @@ package terraform import ( "context" + "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -59,7 +61,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con // If the execPath already exists, return it. execPath := filepath.Join(binDir, product.Terraform.BinaryName()) _, err = os.Stat(execPath) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return "", err } if err == nil { @@ -148,7 +150,7 @@ func getEnvVarWithMatchingVersion(ctx context.Context, envVarName string, versio // If the path does not exist, we return early. _, err := os.Stat(envValue) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { log.Debugf(ctx, "%s at %s does not exist", envVarName, envValue) return "", nil } else { @@ -216,6 +218,23 @@ func setProxyEnvVars(ctx context.Context, environ map[string]string, b *bundle.B return nil } +func setUserAgentExtraEnvVar(environ map[string]string, b *bundle.Bundle) error { + var products []string + + if experimental := b.Config.Experimental; experimental != nil { + if experimental.PyDABs.Enabled { + products = append(products, "databricks-pydabs/0.0.0") + } + } + + userAgentExtra := strings.Join(products, " ") + if userAgentExtra != "" { + environ["DATABRICKS_USER_AGENT_EXTRA"] = userAgentExtra + } + + return nil +} + func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tfConfig := b.Config.Bundle.Terraform if tfConfig == nil { @@ -260,6 +279,11 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnosti return diag.FromErr(err) } + err = setUserAgentExtraEnvVar(environ, b) + if err != nil { + return diag.FromErr(err) + } + // Configure environment variables for auth for Terraform to use. log.Debugf(ctx, "Environment variables for Terraform: %s", strings.Join(maps.Keys(environ), ", ")) err = tf.SetEnv(environ) diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index 421e9be3f..aa9b2f77f 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -248,6 +248,27 @@ func TestSetProxyEnvVars(t *testing.T) { assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env)) } +func TestSetUserAgentExtraEnvVar(t *testing.T) { + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Experimental: &config.Experimental{ + PyDABs: config.PyDABs{ + Enabled: true, + }, + }, + }, + } + + env := make(map[string]string, 0) + err := setUserAgentExtraEnvVar(env, b) + + require.NoError(t, err) + assert.Equal(t, map[string]string{ + "DATABRICKS_USER_AGENT_EXTRA": "databricks-pydabs/0.0.0", + }, env) +} + func TestInheritEnvVars(t *testing.T) { env := map[string]string{} diff --git a/bundle/deploy/terraform/tfdyn/rename_keys.go b/bundle/deploy/terraform/tfdyn/rename_keys.go index a65c9f257..650ffb890 100644 --- a/bundle/deploy/terraform/tfdyn/rename_keys.go +++ b/bundle/deploy/terraform/tfdyn/rename_keys.go @@ -28,7 +28,7 @@ func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { p[0] = dyn.Key(newKey) acc, err = dyn.SetByPath(acc, p, v) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } return dyn.InvalidValue, dyn.ErrDrop } diff --git a/bundle/internal/tf/codegen/schema/schema.go b/bundle/internal/tf/codegen/schema/schema.go index 534da4a02..f94b94f04 100644 --- a/bundle/internal/tf/codegen/schema/schema.go +++ b/bundle/internal/tf/codegen/schema/schema.go @@ -2,6 +2,8 @@ package schema import ( "context" + "errors" + "io/fs" "os" "path/filepath" @@ -41,7 +43,7 @@ func Load(ctx context.Context) (*tfjson.ProviderSchema, error) { } // Generate schema file if it doesn't exist. - if _, err := os.Stat(s.ProviderSchemaFile); os.IsNotExist(err) { + if _, err := os.Stat(s.ProviderSchemaFile); errors.Is(err, fs.ErrNotExist) { err = s.Generate(ctx) if err != nil { return nil, err diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index f55b6c4f0..a99f15a40 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.46.0" +const ProviderVersion = "1.48.0" diff --git a/bundle/internal/tf/schema/config.go b/bundle/internal/tf/schema/config.go index d24d57339..a2de987ec 100644 --- a/bundle/internal/tf/schema/config.go +++ b/bundle/internal/tf/schema/config.go @@ -28,6 +28,7 @@ type Config struct { Profile string `json:"profile,omitempty"` RateLimit int `json:"rate_limit,omitempty"` RetryTimeoutSeconds int `json:"retry_timeout_seconds,omitempty"` + ServerlessComputeId string `json:"serverless_compute_id,omitempty"` SkipVerify bool `json:"skip_verify,omitempty"` Token string `json:"token,omitempty"` Username string `json:"username,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go b/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go new file mode 100644 index 000000000..14d5c169d --- /dev/null +++ b/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go @@ -0,0 +1,12 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAwsUnityCatalogAssumeRolePolicy struct { + AwsAccountId string `json:"aws_account_id"` + ExternalId string `json:"external_id"` + Id string `json:"id,omitempty"` + Json string `json:"json,omitempty"` + RoleName string `json:"role_name"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_external_location.go b/bundle/internal/tf/schema/data_source_external_location.go index 0fea6e529..a3e78cbd3 100644 --- a/bundle/internal/tf/schema/data_source_external_location.go +++ b/bundle/internal/tf/schema/data_source_external_location.go @@ -19,6 +19,7 @@ type DataSourceExternalLocationExternalLocationInfo struct { CreatedBy string `json:"created_by,omitempty"` CredentialId string `json:"credential_id,omitempty"` CredentialName string `json:"credential_name,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name,omitempty"` Owner string `json:"owner,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index d517bbe0f..727848ced 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -26,6 +26,7 @@ type DataSourceJobJobSettingsSettingsEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -500,6 +501,7 @@ type DataSourceJobJobSettingsSettingsTaskEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -529,6 +531,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications struc OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -824,6 +827,10 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSt Id string `json:"id"` } +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -832,6 +839,7 @@ type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications str OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -1163,6 +1171,10 @@ type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStart struct { Id string `json:"id"` } +type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -1171,6 +1183,7 @@ type DataSourceJobJobSettingsSettingsTaskWebhookNotifications struct { OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -1236,6 +1249,10 @@ type DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart struct { Id string `json:"id"` } +type DataSourceJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -1244,6 +1261,7 @@ type DataSourceJobJobSettingsSettingsWebhookNotifications struct { OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []DataSourceJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_storage_credential.go b/bundle/internal/tf/schema/data_source_storage_credential.go index c7045d445..bf58f2726 100644 --- a/bundle/internal/tf/schema/data_source_storage_credential.go +++ b/bundle/internal/tf/schema/data_source_storage_credential.go @@ -36,6 +36,7 @@ type DataSourceStorageCredentialStorageCredentialInfo struct { CreatedAt int `json:"created_at,omitempty"` CreatedBy string `json:"created_by,omitempty"` Id string `json:"id,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name,omitempty"` Owner string `json:"owner,omitempty"` diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index c32483db0..b68df2b40 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -3,105 +3,107 @@ package schema type DataSources struct { - AwsAssumeRolePolicy map[string]any `json:"databricks_aws_assume_role_policy,omitempty"` - AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` - AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` - AwsUnityCatalogPolicy map[string]any `json:"databricks_aws_unity_catalog_policy,omitempty"` - Catalog map[string]any `json:"databricks_catalog,omitempty"` - Catalogs map[string]any `json:"databricks_catalogs,omitempty"` - Cluster map[string]any `json:"databricks_cluster,omitempty"` - ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` - Clusters map[string]any `json:"databricks_clusters,omitempty"` - CurrentConfig map[string]any `json:"databricks_current_config,omitempty"` - CurrentMetastore map[string]any `json:"databricks_current_metastore,omitempty"` - CurrentUser map[string]any `json:"databricks_current_user,omitempty"` - DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` - DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"` - Directory map[string]any `json:"databricks_directory,omitempty"` - ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` - ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"` - Group map[string]any `json:"databricks_group,omitempty"` - InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` - InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` - Job map[string]any `json:"databricks_job,omitempty"` - Jobs map[string]any `json:"databricks_jobs,omitempty"` - Metastore map[string]any `json:"databricks_metastore,omitempty"` - Metastores map[string]any `json:"databricks_metastores,omitempty"` - MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` - MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` - MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` - MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` - NodeType map[string]any `json:"databricks_node_type,omitempty"` - Notebook map[string]any `json:"databricks_notebook,omitempty"` - NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"` - Pipelines map[string]any `json:"databricks_pipelines,omitempty"` - Schemas map[string]any `json:"databricks_schemas,omitempty"` - ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` - ServicePrincipals map[string]any `json:"databricks_service_principals,omitempty"` - Share map[string]any `json:"databricks_share,omitempty"` - Shares map[string]any `json:"databricks_shares,omitempty"` - SparkVersion map[string]any `json:"databricks_spark_version,omitempty"` - SqlWarehouse map[string]any `json:"databricks_sql_warehouse,omitempty"` - SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"` - StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` - StorageCredentials map[string]any `json:"databricks_storage_credentials,omitempty"` - Table map[string]any `json:"databricks_table,omitempty"` - Tables map[string]any `json:"databricks_tables,omitempty"` - User map[string]any `json:"databricks_user,omitempty"` - Views map[string]any `json:"databricks_views,omitempty"` - Volumes map[string]any `json:"databricks_volumes,omitempty"` - Zones map[string]any `json:"databricks_zones,omitempty"` + AwsAssumeRolePolicy map[string]any `json:"databricks_aws_assume_role_policy,omitempty"` + AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` + AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` + AwsUnityCatalogAssumeRolePolicy map[string]any `json:"databricks_aws_unity_catalog_assume_role_policy,omitempty"` + AwsUnityCatalogPolicy map[string]any `json:"databricks_aws_unity_catalog_policy,omitempty"` + Catalog map[string]any `json:"databricks_catalog,omitempty"` + Catalogs map[string]any `json:"databricks_catalogs,omitempty"` + Cluster map[string]any `json:"databricks_cluster,omitempty"` + ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` + Clusters map[string]any `json:"databricks_clusters,omitempty"` + CurrentConfig map[string]any `json:"databricks_current_config,omitempty"` + CurrentMetastore map[string]any `json:"databricks_current_metastore,omitempty"` + CurrentUser map[string]any `json:"databricks_current_user,omitempty"` + DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` + DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"` + Directory map[string]any `json:"databricks_directory,omitempty"` + ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` + ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"` + Group map[string]any `json:"databricks_group,omitempty"` + InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` + InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` + Job map[string]any `json:"databricks_job,omitempty"` + Jobs map[string]any `json:"databricks_jobs,omitempty"` + Metastore map[string]any `json:"databricks_metastore,omitempty"` + Metastores map[string]any `json:"databricks_metastores,omitempty"` + MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` + MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` + MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` + MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` + NodeType map[string]any `json:"databricks_node_type,omitempty"` + Notebook map[string]any `json:"databricks_notebook,omitempty"` + NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"` + Pipelines map[string]any `json:"databricks_pipelines,omitempty"` + Schemas map[string]any `json:"databricks_schemas,omitempty"` + ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` + ServicePrincipals map[string]any `json:"databricks_service_principals,omitempty"` + Share map[string]any `json:"databricks_share,omitempty"` + Shares map[string]any `json:"databricks_shares,omitempty"` + SparkVersion map[string]any `json:"databricks_spark_version,omitempty"` + SqlWarehouse map[string]any `json:"databricks_sql_warehouse,omitempty"` + SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"` + StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` + StorageCredentials map[string]any `json:"databricks_storage_credentials,omitempty"` + Table map[string]any `json:"databricks_table,omitempty"` + Tables map[string]any `json:"databricks_tables,omitempty"` + User map[string]any `json:"databricks_user,omitempty"` + Views map[string]any `json:"databricks_views,omitempty"` + Volumes map[string]any `json:"databricks_volumes,omitempty"` + Zones map[string]any `json:"databricks_zones,omitempty"` } func NewDataSources() *DataSources { return &DataSources{ - AwsAssumeRolePolicy: make(map[string]any), - AwsBucketPolicy: make(map[string]any), - AwsCrossaccountPolicy: make(map[string]any), - AwsUnityCatalogPolicy: make(map[string]any), - Catalog: make(map[string]any), - Catalogs: make(map[string]any), - Cluster: make(map[string]any), - ClusterPolicy: make(map[string]any), - Clusters: make(map[string]any), - CurrentConfig: make(map[string]any), - CurrentMetastore: make(map[string]any), - CurrentUser: make(map[string]any), - DbfsFile: make(map[string]any), - DbfsFilePaths: make(map[string]any), - Directory: make(map[string]any), - ExternalLocation: make(map[string]any), - ExternalLocations: make(map[string]any), - Group: make(map[string]any), - InstancePool: make(map[string]any), - InstanceProfiles: make(map[string]any), - Job: make(map[string]any), - Jobs: make(map[string]any), - Metastore: make(map[string]any), - Metastores: make(map[string]any), - MlflowExperiment: make(map[string]any), - MlflowModel: make(map[string]any), - MwsCredentials: make(map[string]any), - MwsWorkspaces: make(map[string]any), - NodeType: make(map[string]any), - Notebook: make(map[string]any), - NotebookPaths: make(map[string]any), - Pipelines: make(map[string]any), - Schemas: make(map[string]any), - ServicePrincipal: make(map[string]any), - ServicePrincipals: make(map[string]any), - Share: make(map[string]any), - Shares: make(map[string]any), - SparkVersion: make(map[string]any), - SqlWarehouse: make(map[string]any), - SqlWarehouses: make(map[string]any), - StorageCredential: make(map[string]any), - StorageCredentials: make(map[string]any), - Table: make(map[string]any), - Tables: make(map[string]any), - User: make(map[string]any), - Views: make(map[string]any), - Volumes: make(map[string]any), - Zones: make(map[string]any), + AwsAssumeRolePolicy: make(map[string]any), + AwsBucketPolicy: make(map[string]any), + AwsCrossaccountPolicy: make(map[string]any), + AwsUnityCatalogAssumeRolePolicy: make(map[string]any), + AwsUnityCatalogPolicy: make(map[string]any), + Catalog: make(map[string]any), + Catalogs: make(map[string]any), + Cluster: make(map[string]any), + ClusterPolicy: make(map[string]any), + Clusters: make(map[string]any), + CurrentConfig: make(map[string]any), + CurrentMetastore: make(map[string]any), + CurrentUser: make(map[string]any), + DbfsFile: make(map[string]any), + DbfsFilePaths: make(map[string]any), + Directory: make(map[string]any), + ExternalLocation: make(map[string]any), + ExternalLocations: make(map[string]any), + Group: make(map[string]any), + InstancePool: make(map[string]any), + InstanceProfiles: make(map[string]any), + Job: make(map[string]any), + Jobs: make(map[string]any), + Metastore: make(map[string]any), + Metastores: make(map[string]any), + MlflowExperiment: make(map[string]any), + MlflowModel: make(map[string]any), + MwsCredentials: make(map[string]any), + MwsWorkspaces: make(map[string]any), + NodeType: make(map[string]any), + Notebook: make(map[string]any), + NotebookPaths: make(map[string]any), + Pipelines: make(map[string]any), + Schemas: make(map[string]any), + ServicePrincipal: make(map[string]any), + ServicePrincipals: make(map[string]any), + Share: make(map[string]any), + Shares: make(map[string]any), + SparkVersion: make(map[string]any), + SqlWarehouse: make(map[string]any), + SqlWarehouses: make(map[string]any), + StorageCredential: make(map[string]any), + StorageCredentials: make(map[string]any), + Table: make(map[string]any), + Tables: make(map[string]any), + User: make(map[string]any), + Views: make(map[string]any), + Volumes: make(map[string]any), + Zones: make(map[string]any), } } diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 0950073e2..42b648b0f 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -26,6 +26,7 @@ type ResourceJobEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -573,6 +574,7 @@ type ResourceJobTaskEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -602,6 +604,7 @@ type ResourceJobTaskForEachTaskTaskEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } @@ -943,6 +946,10 @@ type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStart struct { Id string `json:"id"` } +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -951,6 +958,7 @@ type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { OnDurationWarningThresholdExceeded []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -1329,6 +1337,10 @@ type ResourceJobTaskWebhookNotificationsOnStart struct { Id string `json:"id"` } +type ResourceJobTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type ResourceJobTaskWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -1337,6 +1349,7 @@ type ResourceJobTaskWebhookNotifications struct { OnDurationWarningThresholdExceeded []ResourceJobTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []ResourceJobTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []ResourceJobTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []ResourceJobTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []ResourceJobTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -1378,6 +1391,11 @@ type ResourceJobTriggerFileArrival struct { WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` } +type ResourceJobTriggerPeriodic struct { + Interval int `json:"interval"` + Unit string `json:"unit"` +} + type ResourceJobTriggerTable struct { Condition string `json:"condition,omitempty"` MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` @@ -1395,6 +1413,7 @@ type ResourceJobTriggerTableUpdate struct { type ResourceJobTrigger struct { PauseStatus string `json:"pause_status,omitempty"` FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"` + Periodic *ResourceJobTriggerPeriodic `json:"periodic,omitempty"` Table *ResourceJobTriggerTable `json:"table,omitempty"` TableUpdate *ResourceJobTriggerTableUpdate `json:"table_update,omitempty"` } @@ -1411,6 +1430,10 @@ type ResourceJobWebhookNotificationsOnStart struct { Id string `json:"id"` } +type ResourceJobWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + type ResourceJobWebhookNotificationsOnSuccess struct { Id string `json:"id"` } @@ -1419,6 +1442,7 @@ type ResourceJobWebhookNotifications struct { OnDurationWarningThresholdExceeded []ResourceJobWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []ResourceJobWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []ResourceJobWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []ResourceJobWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []ResourceJobWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_mws_workspaces.go b/bundle/internal/tf/schema/resource_mws_workspaces.go index 21d1ce428..6c053cb84 100644 --- a/bundle/internal/tf/schema/resource_mws_workspaces.go +++ b/bundle/internal/tf/schema/resource_mws_workspaces.go @@ -43,6 +43,7 @@ type ResourceMwsWorkspaces struct { CustomTags map[string]string `json:"custom_tags,omitempty"` CustomerManagedKeyId string `json:"customer_managed_key_id,omitempty"` DeploymentName string `json:"deployment_name,omitempty"` + GcpWorkspaceSa string `json:"gcp_workspace_sa,omitempty"` Id string `json:"id,omitempty"` IsNoPublicIpEnabled bool `json:"is_no_public_ip_enabled,omitempty"` Location string `json:"location,omitempty"` diff --git a/bundle/internal/tf/schema/resource_online_table.go b/bundle/internal/tf/schema/resource_online_table.go index af8a348d3..de671eade 100644 --- a/bundle/internal/tf/schema/resource_online_table.go +++ b/bundle/internal/tf/schema/resource_online_table.go @@ -19,8 +19,9 @@ type ResourceOnlineTableSpec struct { } type ResourceOnlineTable struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - Status []any `json:"status,omitempty"` - Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + Status []any `json:"status,omitempty"` + TableServingUrl string `json:"table_serving_url,omitempty"` + Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_storage_credential.go b/bundle/internal/tf/schema/resource_storage_credential.go index 3d4a501ea..b565a5c78 100644 --- a/bundle/internal/tf/schema/resource_storage_credential.go +++ b/bundle/internal/tf/schema/resource_storage_credential.go @@ -41,6 +41,7 @@ type ResourceStorageCredential struct { Owner string `json:"owner,omitempty"` ReadOnly bool `json:"read_only,omitempty"` SkipValidation bool `json:"skip_validation,omitempty"` + StorageCredentialId string `json:"storage_credential_id,omitempty"` AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"` AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"` AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"` diff --git a/bundle/internal/tf/schema/resource_system_schema.go b/bundle/internal/tf/schema/resource_system_schema.go index 09a86103a..fe5b128d6 100644 --- a/bundle/internal/tf/schema/resource_system_schema.go +++ b/bundle/internal/tf/schema/resource_system_schema.go @@ -3,6 +3,7 @@ package schema type ResourceSystemSchema struct { + FullName string `json:"full_name,omitempty"` Id string `json:"id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Schema string `json:"schema,omitempty"` diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index e4ca67740..39db3ea2f 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -21,7 +21,7 @@ type Root struct { const ProviderHost = "registry.terraform.io" const ProviderSource = "databricks/databricks" -const ProviderVersion = "1.46.0" +const ProviderVersion = "1.48.0" func NewRoot() *Root { return &Root{ diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go index 6d39630c8..60264f6ea 100644 --- a/bundle/permissions/filter.go +++ b/bundle/permissions/filter.go @@ -66,6 +66,11 @@ func (m *filterCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.Di err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { rv, err := dyn.Get(v, "resources") if err != nil { + // If the resources key is not found, we can skip this mutator. + if dyn.IsNoSuchKeyError(err) { + return v, nil + } + return dyn.InvalidValue, err } diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index 563657677..109667289 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -4,6 +4,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" + pythonmutator "github.com/databricks/cli/bundle/config/mutator/python" "github.com/databricks/cli/bundle/deploy/metadata" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/permissions" @@ -28,8 +29,13 @@ func Initialize() bundle.Mutator { mutator.ExpandWorkspaceRoot(), mutator.DefineDefaultWorkspacePaths(), mutator.SetVariables(), + // Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences, + // ResolveVariableReferencesInComplexVariables and ResolveVariableReferences. + // See what is expected in PythonMutatorPhaseInit doc + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseInit), mutator.ResolveVariableReferencesInLookup(), mutator.ResolveResourceReferences(), + mutator.ResolveVariableReferencesInComplexVariables(), mutator.ResolveVariableReferences( "bundle", "workspace", @@ -41,6 +47,10 @@ func Initialize() bundle.Mutator { mutator.ProcessTargetMode(), mutator.DefaultQueueing(), mutator.ExpandPipelineGlobPaths(), + + // Configure use of WSFS for reads if the CLI is running on Databricks. + mutator.ConfigureWSFS(), + mutator.TranslatePaths(), python.WrapperWarning(), permissions.ApplyBundlePermissions(), diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go new file mode 100644 index 000000000..37ea188f7 --- /dev/null +++ b/bundle/render/render_text_output.go @@ -0,0 +1,176 @@ +package render + +import ( + "fmt" + "io" + "path/filepath" + "strings" + "text/template" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/fatih/color" +) + +var renderFuncMap = template.FuncMap{ + "red": color.RedString, + "green": color.GreenString, + "blue": color.BlueString, + "yellow": color.YellowString, + "magenta": color.MagentaString, + "cyan": color.CyanString, + "bold": func(format string, a ...interface{}) string { + return color.New(color.Bold).Sprintf(format, a...) + }, + "italic": func(format string, a ...interface{}) string { + return color.New(color.Italic).Sprintf(format, a...) + }, +} + +const errorTemplate = `{{ "Error" | red }}: {{ .Summary }} +{{- if .Path.String }} + {{ "at " }}{{ .Path.String | green }} +{{- end }} +{{- if .Location.File }} + {{ "in " }}{{ .Location.String | cyan }} +{{- end }} +{{- if .Detail }} + +{{ .Detail }} +{{- end }} + +` + +const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }} +{{- if .Path.String }} + {{ "at " }}{{ .Path.String | green }} +{{- end }} +{{- if .Location.File }} + {{ "in " }}{{ .Location.String | cyan }} +{{- end }} +{{- if .Detail }} + +{{ .Detail }} +{{- end }} + +` + +const summaryTemplate = `{{- if .Name -}} +Name: {{ .Name | bold }} +{{- if .Target }} +Target: {{ .Target | bold }} +{{- end }} +{{- if or .User .Host .Path }} +Workspace: +{{- if .Host }} + Host: {{ .Host | bold }} +{{- end }} +{{- if .User }} + User: {{ .User | bold }} +{{- end }} +{{- if .Path }} + Path: {{ .Path | bold }} +{{- end }} +{{- end }} + +{{ end -}} + +{{ .Trailer }} +` + +func pluralize(n int, singular, plural string) string { + if n == 1 { + return fmt.Sprintf("%d %s", n, singular) + } + return fmt.Sprintf("%d %s", n, plural) +} + +func buildTrailer(diags diag.Diagnostics) string { + parts := []string{} + if errors := len(diags.Filter(diag.Error)); errors > 0 { + parts = append(parts, color.RedString(pluralize(errors, "error", "errors"))) + } + if warnings := len(diags.Filter(diag.Warning)); warnings > 0 { + parts = append(parts, color.YellowString(pluralize(warnings, "warning", "warnings"))) + } + if len(parts) > 0 { + return fmt.Sprintf("Found %s", strings.Join(parts, " and ")) + } else { + return color.GreenString("Validation OK!") + } +} + +func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { + if b == nil { + return renderSummaryTemplate(out, &bundle.Bundle{}, diags) + } + + var currentUser = &iam.User{} + + if b.Config.Workspace.CurrentUser != nil { + if b.Config.Workspace.CurrentUser.User != nil { + currentUser = b.Config.Workspace.CurrentUser.User + } + } + + t := template.Must(template.New("summary").Funcs(renderFuncMap).Parse(summaryTemplate)) + err := t.Execute(out, map[string]any{ + "Name": b.Config.Bundle.Name, + "Target": b.Config.Bundle.Target, + "User": currentUser.UserName, + "Path": b.Config.Workspace.RootPath, + "Host": b.Config.Workspace.Host, + "Trailer": buildTrailer(diags), + }) + + return err +} + +func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { + errorT := template.Must(template.New("error").Funcs(renderFuncMap).Parse(errorTemplate)) + warningT := template.Must(template.New("warning").Funcs(renderFuncMap).Parse(warningTemplate)) + + // Print errors and warnings. + for _, d := range diags { + var t *template.Template + switch d.Severity { + case diag.Error: + t = errorT + case diag.Warning: + t = warningT + } + + // Make file relative to bundle root + if d.Location.File != "" { + out, err := filepath.Rel(b.RootPath, d.Location.File) + // if we can't relativize the path, just use path as-is + if err == nil { + d.Location.File = out + } + } + + // Render the diagnostic with the appropriate template. + err := t.Execute(out, d) + if err != nil { + return fmt.Errorf("failed to render template: %w", err) + } + } + + return nil +} + +// RenderTextOutput renders the diagnostics in a human-readable format. +func RenderTextOutput(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { + err := renderDiagnostics(out, b, diags) + if err != nil { + return fmt.Errorf("failed to render diagnostics: %w", err) + } + + err = renderSummaryTemplate(out, b, diags) + if err != nil { + return fmt.Errorf("failed to render summary: %w", err) + } + + return nil +} diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go new file mode 100644 index 000000000..4ae86ded7 --- /dev/null +++ b/bundle/render/render_text_output_test.go @@ -0,0 +1,258 @@ +package render + +import ( + "bytes" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/require" +) + +type renderTestOutputTestCase struct { + name string + bundle *bundle.Bundle + diags diag.Diagnostics + expected string +} + +func TestRenderTextOutput(t *testing.T) { + loadingBundle := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "test-bundle", + Target: "test-target", + }, + }, + } + + testCases := []renderTestOutputTestCase{ + { + name: "nil bundle and 1 error", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "failed to load xxx", + }, + }, + expected: "Error: failed to load xxx\n" + + "\n" + + "Found 1 error\n", + }, + { + name: "bundle during 'load' and 1 error", + bundle: loadingBundle, + diags: diag.Errorf("failed to load bundle"), + expected: "Error: failed to load bundle\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 1 error\n", + }, + { + name: "bundle during 'load' and 1 warning", + bundle: loadingBundle, + diags: diag.Warningf("failed to load bundle"), + expected: "Warning: failed to load bundle\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 1 warning\n", + }, + { + name: "bundle during 'load' and 2 warnings", + bundle: loadingBundle, + diags: diag.Warningf("warning (1)").Extend(diag.Warningf("warning (2)")), + expected: "Warning: warning (1)\n" + + "\n" + + "Warning: warning (2)\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 2 warnings\n", + }, + { + name: "bundle during 'load' and 2 errors, 1 warning with details", + bundle: loadingBundle, + diags: diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Error, + Summary: "error (1)", + Detail: "detail (1)", + Location: dyn.Location{ + File: "foo.py", + Line: 1, + Column: 1, + }, + }, + diag.Diagnostic{ + Severity: diag.Error, + Summary: "error (2)", + Detail: "detail (2)", + Location: dyn.Location{ + File: "foo.py", + Line: 2, + Column: 1, + }, + }, + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "warning (3)", + Detail: "detail (3)", + Location: dyn.Location{ + File: "foo.py", + Line: 3, + Column: 1, + }, + }, + }, + expected: "Error: error (1)\n" + + " in foo.py:1:1\n" + + "\n" + + "detail (1)\n" + + "\n" + + "Error: error (2)\n" + + " in foo.py:2:1\n" + + "\n" + + "detail (2)\n" + + "\n" + + "Warning: warning (3)\n" + + " in foo.py:3:1\n" + + "\n" + + "detail (3)\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 2 errors and 1 warning\n", + }, + { + name: "bundle during 'init'", + bundle: &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "test-bundle", + Target: "test-target", + }, + Workspace: config.Workspace{ + Host: "https://localhost/", + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "test-user", + }, + }, + RootPath: "/Users/test-user@databricks.com/.bundle/examples/test-target", + }, + }, + }, + diags: nil, + expected: "Name: test-bundle\n" + + "Target: test-target\n" + + "Workspace:\n" + + " Host: https://localhost/\n" + + " User: test-user\n" + + " Path: /Users/test-user@databricks.com/.bundle/examples/test-target\n" + + "\n" + + "Validation OK!\n", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + writer := &bytes.Buffer{} + + err := RenderTextOutput(writer, tc.bundle, tc.diags) + require.NoError(t, err) + + assert.Equal(t, tc.expected, writer.String()) + }) + } +} + +type renderDiagnosticsTestCase struct { + name string + diags diag.Diagnostics + expected string +} + +func TestRenderDiagnostics(t *testing.T) { + bundle := &bundle.Bundle{} + + testCases := []renderDiagnosticsTestCase{ + { + name: "empty diagnostics", + diags: diag.Diagnostics{}, + expected: "", + }, + { + name: "error with short summary", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "failed to load xxx", + }, + }, + expected: "Error: failed to load xxx\n\n", + }, + { + name: "error with source location", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "failed to load xxx", + Detail: "'name' is required", + Location: dyn.Location{ + File: "foo.yaml", + Line: 1, + Column: 2, + }, + }, + }, + expected: "Error: failed to load xxx\n" + + " in foo.yaml:1:2\n\n" + + "'name' is required\n\n", + }, + { + name: "error with path", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Detail: "'name' is required", + Summary: "failed to load xxx", + Path: dyn.MustPathFromString("resources.jobs.xxx"), + }, + }, + expected: "Error: failed to load xxx\n" + + " at resources.jobs.xxx\n" + + "\n" + + "'name' is required\n\n", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + writer := &bytes.Buffer{} + + err := renderDiagnostics(writer, bundle, tc.diags) + require.NoError(t, err) + + assert.Equal(t, tc.expected, writer.String()) + }) + } +} + +func TestRenderSummaryTemplate_nilBundle(t *testing.T) { + writer := &bytes.Buffer{} + + err := renderSummaryTemplate(writer, nil, nil) + require.NoError(t, err) + + assert.Equal(t, "Validation OK!\n", writer.String()) +} diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index b6d0235aa..380be0545 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -79,6 +79,17 @@ "experimental": { "description": "", "properties": { + "pydabs": { + "description": "", + "properties": { + "enabled": { + "description": "" + }, + "venv_path": { + "description": "" + } + } + }, "python_wheel_wrapper": { "description": "" }, @@ -236,6 +247,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -853,6 +870,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -1595,6 +1618,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -1634,6 +1668,17 @@ "pause_status": { "description": "Whether this trigger is paused or not." }, + "periodic": { + "description": "Periodic trigger settings.", + "properties": { + "interval": { + "description": "The interval at which the trigger should run." + }, + "unit": { + "description": "The unit of time for the interval." + } + } + }, "table": { "description": "Old table trigger settings name. Deprecated in favor of `table_update`.", "properties": { @@ -1712,6 +1757,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -1740,16 +1796,16 @@ "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.", "properties": { "catalog_name": { - "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set." + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled." }, "enabled": { - "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again." + "description": "Indicates whether the inference table is enabled." }, "schema_name": { - "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set." + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled." }, "table_name_prefix": { - "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set." + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled." } } }, @@ -1915,7 +1971,7 @@ "description": "ARN of the instance profile that the served model will use to access AWS resources." }, "model_name": { - "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, \nin the form of __catalog_name__.__schema_name__.__model_name__.\n" + "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model,\nin the form of __catalog_name__.__schema_name__.__model_name__.\n" }, "model_version": { "description": "The version of the model in Databricks Model Registry or Unity Catalog to be served." @@ -2623,7 +2679,7 @@ } }, "notebook": { - "description": "The path to a notebook that defines a pipeline and is stored in the \u003cDatabricks\u003e workspace.\n", + "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.\n", "properties": { "path": { "description": "The absolute path of the notebook." @@ -2710,6 +2766,156 @@ } } }, + "quality_monitors": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "assets_dir": { + "description": "" + }, + "baseline_table_name": { + "description": "" + }, + "custom_metrics": { + "description": "", + "items": { + "description": "", + "properties": { + "definition": { + "description": "" + }, + "input_columns": { + "description": "", + "items": { + "description": "" + } + }, + "name": { + "description": "" + }, + "output_data_type": { + "description": "" + }, + "type": { + "description": "" + } + } + } + }, + "data_classification_config": { + "description": "", + "properties": { + "enabled": { + "description": "" + } + } + }, + "inference_log": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "label_col": { + "description": "" + }, + "model_id_col": { + "description": "" + }, + "prediction_col": { + "description": "" + }, + "prediction_proba_col": { + "description": "" + }, + "problem_type": { + "description": "" + }, + "timestamp_col": { + "description": "" + } + } + }, + "notifications": { + "description": "", + "properties": { + "on_failure": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + }, + "on_new_classification_tag_detected": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + } + } + }, + "output_schema_name": { + "description": "" + }, + "schedule": { + "description": "", + "properties": { + "pause_status": { + "description": "" + }, + "quartz_cron_expression": { + "description": "" + }, + "timezone_id": { + "description": "" + } + } + }, + "skip_builtin_dashboard": { + "description": "" + }, + "slicing_exprs": { + "description": "", + "items": { + "description": "" + } + }, + "snapshot": { + "description": "" + }, + "time_series": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "timestamp_col": { + "description": "" + } + } + }, + "warehouse_id": { + "description": "" + } + } + } + }, "registered_models": { "description": "List of Registered Models", "additionalproperties": { @@ -3017,6 +3223,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -3634,6 +3846,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -4376,6 +4594,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -4415,6 +4644,17 @@ "pause_status": { "description": "Whether this trigger is paused or not." }, + "periodic": { + "description": "Periodic trigger settings.", + "properties": { + "interval": { + "description": "The interval at which the trigger should run." + }, + "unit": { + "description": "The unit of time for the interval." + } + } + }, "table": { "description": "Old table trigger settings name. Deprecated in favor of `table_update`.", "properties": { @@ -4493,6 +4733,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -4521,16 +4772,16 @@ "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.", "properties": { "catalog_name": { - "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set." + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled." }, "enabled": { - "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again." + "description": "Indicates whether the inference table is enabled." }, "schema_name": { - "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set." + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled." }, "table_name_prefix": { - "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set." + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled." } } }, @@ -4696,7 +4947,7 @@ "description": "ARN of the instance profile that the served model will use to access AWS resources." }, "model_name": { - "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, \nin the form of __catalog_name__.__schema_name__.__model_name__.\n" + "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model,\nin the form of __catalog_name__.__schema_name__.__model_name__.\n" }, "model_version": { "description": "The version of the model in Databricks Model Registry or Unity Catalog to be served." @@ -5404,7 +5655,7 @@ } }, "notebook": { - "description": "The path to a notebook that defines a pipeline and is stored in the \u003cDatabricks\u003e workspace.\n", + "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.\n", "properties": { "path": { "description": "The absolute path of the notebook." @@ -5491,6 +5742,156 @@ } } }, + "quality_monitors": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "assets_dir": { + "description": "" + }, + "baseline_table_name": { + "description": "" + }, + "custom_metrics": { + "description": "", + "items": { + "description": "", + "properties": { + "definition": { + "description": "" + }, + "input_columns": { + "description": "", + "items": { + "description": "" + } + }, + "name": { + "description": "" + }, + "output_data_type": { + "description": "" + }, + "type": { + "description": "" + } + } + } + }, + "data_classification_config": { + "description": "", + "properties": { + "enabled": { + "description": "" + } + } + }, + "inference_log": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "label_col": { + "description": "" + }, + "model_id_col": { + "description": "" + }, + "prediction_col": { + "description": "" + }, + "prediction_proba_col": { + "description": "" + }, + "problem_type": { + "description": "" + }, + "timestamp_col": { + "description": "" + } + } + }, + "notifications": { + "description": "", + "properties": { + "on_failure": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + }, + "on_new_classification_tag_detected": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + } + } + }, + "output_schema_name": { + "description": "" + }, + "schedule": { + "description": "", + "properties": { + "pause_status": { + "description": "" + }, + "quartz_cron_expression": { + "description": "" + }, + "timezone_id": { + "description": "" + } + } + }, + "skip_builtin_dashboard": { + "description": "" + }, + "slicing_exprs": { + "description": "", + "items": { + "description": "" + } + }, + "snapshot": { + "description": "" + }, + "time_series": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "timestamp_col": { + "description": "" + } + } + }, + "warehouse_id": { + "description": "" + } + } + } + }, "registered_models": { "description": "List of Registered Models", "additionalproperties": { diff --git a/bundle/schema/schema_test.go b/bundle/schema/schema_test.go index ea4fd1020..6d9df0cc7 100644 --- a/bundle/schema/schema_test.go +++ b/bundle/schema/schema_test.go @@ -20,7 +20,7 @@ func TestIntSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }` @@ -47,7 +47,7 @@ func TestBooleanSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }` @@ -123,7 +123,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -134,7 +134,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -145,7 +145,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -156,7 +156,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -167,7 +167,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -178,7 +178,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -189,7 +189,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -200,7 +200,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -214,7 +214,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -225,7 +225,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -236,7 +236,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -247,7 +247,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -258,7 +258,7 @@ func TestStructOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -326,7 +326,7 @@ func TestStructOfStructsSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -391,7 +391,7 @@ func TestStructOfMapsSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -481,7 +481,7 @@ func TestMapOfPrimitivesSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -518,7 +518,7 @@ func TestMapOfStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -556,7 +556,7 @@ func TestMapOfMapSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -661,7 +661,7 @@ func TestSliceOfMapSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -699,7 +699,7 @@ func TestSliceOfStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -757,7 +757,7 @@ func TestEmbeddedStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -797,7 +797,7 @@ func TestEmbeddedStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -892,7 +892,7 @@ func TestNonAnnotatedFieldsAreSkipped(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -934,7 +934,7 @@ func TestDashFieldsAreSkipped(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -987,7 +987,7 @@ func TestPointerInStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1004,7 +1004,7 @@ func TestPointerInStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1018,7 +1018,7 @@ func TestPointerInStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1035,7 +1035,7 @@ func TestPointerInStructSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1106,7 +1106,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1129,7 +1129,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1157,7 +1157,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1180,7 +1180,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1210,7 +1210,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1236,7 +1236,7 @@ func TestGenericSchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1322,7 +1322,7 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1333,7 +1333,7 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1347,7 +1347,7 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1429,7 +1429,7 @@ func TestDocIngestionForObject(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1512,7 +1512,7 @@ func TestDocIngestionForSlice(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1524,7 +1524,7 @@ func TestDocIngestionForSlice(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1611,7 +1611,7 @@ func TestDocIngestionForMap(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1623,7 +1623,7 @@ func TestDocIngestionForMap(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1683,7 +1683,7 @@ func TestDocIngestionForTopLevelPrimitive(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] } @@ -1761,7 +1761,7 @@ func TestInterfaceGeneratesEmptySchema(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1810,7 +1810,7 @@ func TestBundleReadOnlytag(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, @@ -1870,7 +1870,7 @@ func TestBundleInternalTag(t *testing.T) { }, { "type": "string", - "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\\}" + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" } ] }, diff --git a/bundle/tests/complex_variables_test.go b/bundle/tests/complex_variables_test.go new file mode 100644 index 000000000..1badea6df --- /dev/null +++ b/bundle/tests/complex_variables_test.go @@ -0,0 +1,70 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/require" +) + +func TestComplexVariables(t *testing.T) { + b, diags := loadTargetWithDiags("variables/complex", "default") + require.Empty(t, diags) + + diags = bundle.Apply(context.Background(), b, bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferencesInComplexVariables(), + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) + + require.Equal(t, "13.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) + require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, "some-policy-id", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) + require.Equal(t, 2, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) + require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) + require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) + + require.Equal(t, 3, len(b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries)) + require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ + Jar: "/path/to/jar", + }) + require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ + Egg: "/path/to/egg", + }) + require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ + Whl: "/path/to/whl", + }) + + require.Equal(t, "task with spark version 13.2.x-scala2.11 and jar /path/to/jar", b.Config.Resources.Jobs["my_job"].Tasks[0].TaskKey) +} + +func TestComplexVariablesOverride(t *testing.T) { + b, diags := loadTargetWithDiags("variables/complex", "dev") + require.Empty(t, diags) + + diags = bundle.Apply(context.Background(), b, bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferencesInComplexVariables(), + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) + + require.Equal(t, "14.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) + require.Equal(t, "Standard_DS3_v3", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, 4, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) + require.Equal(t, "false", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) + + // Making sure the variable is overriden and not merged / extended + // These properties are set in the default target but not set in override target + // So they should be empty + require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) + require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) +} diff --git a/bundle/tests/override_sync_test.go b/bundle/tests/override_sync_test.go deleted file mode 100644 index 64f28e377..000000000 --- a/bundle/tests/override_sync_test.go +++ /dev/null @@ -1,41 +0,0 @@ -package config_tests - -import ( - "path/filepath" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/stretchr/testify/assert" -) - -func TestOverrideSyncTarget(t *testing.T) { - var b *bundle.Bundle - - b = loadTarget(t, "./override_sync", "development") - assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("tests/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync", "staging") - assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync", "prod") - assert.ElementsMatch(t, []string{filepath.FromSlash("src/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) -} - -func TestOverrideSyncTargetNoRootSync(t *testing.T) { - var b *bundle.Bundle - - b = loadTarget(t, "./override_sync_no_root", "development") - assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync_no_root", "staging") - assert.ElementsMatch(t, []string{filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync_no_root", "prod") - assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) -} diff --git a/bundle/tests/quality_monitor/databricks.yml b/bundle/tests/quality_monitor/databricks.yml index 3abcdfdda..6138b9357 100644 --- a/bundle/tests/quality_monitor/databricks.yml +++ b/bundle/tests/quality_monitor/databricks.yml @@ -1,19 +1,26 @@ +bundle: + name: quality_monitors + resources: quality_monitors: my_monitor: - table_name: "main.test.thing1" + table_name: "main.test.dev" assets_dir: "/Shared/provider-test/databricks_monitoring/main.test.thing1" - output_schema_name: "test" + output_schema_name: "main.dev" inference_log: granularities: ["1 day"] timestamp_col: "timestamp" prediction_col: "prediction" model_id_col: "model_id" problem_type: "PROBLEM_TYPE_REGRESSION" + schedule: + quartz_cron_expression: "0 0 12 * * ?" # every day at noon + timezone_id: UTC targets: development: mode: development + default: true resources: quality_monitors: my_monitor: @@ -24,14 +31,14 @@ targets: quality_monitors: my_monitor: table_name: "main.test.staging" - output_schema_name: "staging" + output_schema_name: "main.staging" production: resources: quality_monitors: my_monitor: table_name: "main.test.prod" - output_schema_name: "prod" + output_schema_name: "main.prod" inference_log: granularities: ["1 hour"] timestamp_col: "timestamp_prod" diff --git a/bundle/tests/quality_monitor_test.go b/bundle/tests/quality_monitor_test.go index d5db05196..9b91052f5 100644 --- a/bundle/tests/quality_monitor_test.go +++ b/bundle/tests/quality_monitor_test.go @@ -24,7 +24,7 @@ func TestMonitorTableNames(t *testing.T) { p := b.Config.Resources.QualityMonitors["my_monitor"] assert.Equal(t, "main.test.dev", p.TableName) assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) - assert.Equal(t, "test", p.OutputSchemaName) + assert.Equal(t, "main.dev", p.OutputSchemaName) assertExpectedMonitor(t, p) } @@ -36,7 +36,7 @@ func TestMonitorStaging(t *testing.T) { p := b.Config.Resources.QualityMonitors["my_monitor"] assert.Equal(t, "main.test.staging", p.TableName) assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) - assert.Equal(t, "staging", p.OutputSchemaName) + assert.Equal(t, "main.staging", p.OutputSchemaName) assertExpectedMonitor(t, p) } @@ -48,7 +48,7 @@ func TestMonitorProduction(t *testing.T) { p := b.Config.Resources.QualityMonitors["my_monitor"] assert.Equal(t, "main.test.prod", p.TableName) assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) - assert.Equal(t, "prod", p.OutputSchemaName) + assert.Equal(t, "main.prod", p.OutputSchemaName) inferenceLog := p.InferenceLog assert.Equal(t, []string{"1 day", "1 hour"}, inferenceLog.Granularities) diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml similarity index 100% rename from bundle/tests/run_as/not_allowed/neither_sp_nor_user/databricks.yml rename to bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml new file mode 100644 index 000000000..be18f60e8 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml @@ -0,0 +1,5 @@ +bundle: + name: "abc" + +run_as: + service_principal_name: "" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml new file mode 100644 index 000000000..33c48cb58 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml @@ -0,0 +1,5 @@ +bundle: + name: "abc" + +run_as: + user_name: "" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml new file mode 100644 index 000000000..4b59dc918 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml @@ -0,0 +1,6 @@ +bundle: + name: "abc" + +run_as: + service_principal_name: "" + user_name: "" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/databricks.yml similarity index 100% rename from bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/databricks.yml rename to bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/databricks.yml diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/override.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/override.yml similarity index 100% rename from bundle/tests/run_as/not_allowed/neither_sp_nor_user_override/override.yml rename to bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/override.yml diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 37eed2896..725eee7b4 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -198,27 +198,53 @@ func TestRunAsErrorWhenBothUserAndSpSpecified(t *testing.T) { } func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) { - b := load(t, "./run_as/not_allowed/neither_sp_nor_user") + tcases := []struct { + name string + err string + }{ + { + name: "empty_run_as", + err: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified", + }, + { + name: "empty_sp", + err: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified", + }, + { + name: "empty_user", + err: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified", + }, + { + name: "empty_user_and_sp", + err: "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified", + }, + } - ctx := context.Background() - bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { - b.Config.Workspace.CurrentUser = &config.User{ - User: &iam.User{ - UserName: "my_service_principal", - }, - } - return nil - }) + for _, tc := range tcases { + t.Run(tc.name, func(t *testing.T) { - diags := bundle.Apply(ctx, b, mutator.SetRunAs()) - err := diags.Error() + bundlePath := fmt.Sprintf("./run_as/not_allowed/neither_sp_nor_user/%s", tc.name) + b := load(t, bundlePath) - configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/databricks.yml") - assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:8", configPath)) + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + assert.EqualError(t, err, tc.err) + }) + } } func TestRunAsErrorNeitherUserOrSpSpecifiedAtTargetOverride(t *testing.T) { - b := loadTarget(t, "./run_as/not_allowed/neither_sp_nor_user_override", "development") + b := loadTarget(t, "./run_as/not_allowed/neither_sp_nor_user/override", "development") ctx := context.Background() bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { @@ -233,8 +259,7 @@ func TestRunAsErrorNeitherUserOrSpSpecifiedAtTargetOverride(t *testing.T) { diags := bundle.Apply(ctx, b, mutator.SetRunAs()) err := diags.Error() - configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user_override/override.yml") - assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:12", configPath)) + assert.EqualError(t, err, "run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified") } func TestLegacyRunAs(t *testing.T) { diff --git a/bundle/tests/suggest_target_test.go b/bundle/tests/suggest_target_test.go index 924d6a4e1..8fb130409 100644 --- a/bundle/tests/suggest_target_test.go +++ b/bundle/tests/suggest_target_test.go @@ -4,14 +4,19 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/cmd/root" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/cli/internal" - "github.com/stretchr/testify/require" ) func TestSuggestTargetIfWrongPassed(t *testing.T) { t.Setenv("BUNDLE_ROOT", filepath.Join("target_overrides", "workspace")) - _, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") - require.ErrorContains(t, err, "Available targets:") - require.ErrorContains(t, err, "development") - require.ErrorContains(t, err, "staging") + stdoutBytes, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") + stdout := stdoutBytes.String() + + assert.Error(t, root.ErrAlreadyPrinted, err) + assert.Contains(t, stdout, "Available targets:") + assert.Contains(t, stdout, "development") + assert.Contains(t, stdout, "staging") } diff --git a/bundle/tests/sync/nil/databricks.yml b/bundle/tests/sync/nil/databricks.yml new file mode 100644 index 000000000..a8b4b901e --- /dev/null +++ b/bundle/tests/sync/nil/databricks.yml @@ -0,0 +1,19 @@ +bundle: + name: sync_nil + +workspace: + host: https://acme.cloud.databricks.com/ + +sync: + include: ~ + exclude: ~ + +targets: + development: + + staging: + sync: + include: + - tests/* + exclude: + - dist diff --git a/bundle/tests/sync/nil_root/databricks.yml b/bundle/tests/sync/nil_root/databricks.yml new file mode 100644 index 000000000..44e6c48ea --- /dev/null +++ b/bundle/tests/sync/nil_root/databricks.yml @@ -0,0 +1,17 @@ +bundle: + name: sync_nil_root + +workspace: + host: https://acme.cloud.databricks.com/ + +sync: ~ + +targets: + development: + + staging: + sync: + include: + - tests/* + exclude: + - dist diff --git a/bundle/tests/override_sync/databricks.yml b/bundle/tests/sync/override/databricks.yml similarity index 93% rename from bundle/tests/override_sync/databricks.yml rename to bundle/tests/sync/override/databricks.yml index 1417b8644..8bb0e1def 100644 --- a/bundle/tests/override_sync/databricks.yml +++ b/bundle/tests/sync/override/databricks.yml @@ -1,5 +1,5 @@ bundle: - name: override_sync + name: sync_override workspace: host: https://acme.cloud.databricks.com/ diff --git a/bundle/tests/override_sync_no_root/databricks.yml b/bundle/tests/sync/override_no_root/databricks.yml similarity index 90% rename from bundle/tests/override_sync_no_root/databricks.yml rename to bundle/tests/sync/override_no_root/databricks.yml index 109d8da1f..bd1bfe8e0 100644 --- a/bundle/tests/override_sync_no_root/databricks.yml +++ b/bundle/tests/sync/override_no_root/databricks.yml @@ -1,5 +1,5 @@ bundle: - name: override_sync + name: sync_override_no_root workspace: host: https://acme.cloud.databricks.com/ diff --git a/bundle/tests/sync_include_exclude_no_matches_test.go b/bundle/tests/sync_include_exclude_no_matches_test.go index 135e2faac..94cedbaa6 100644 --- a/bundle/tests/sync_include_exclude_no_matches_test.go +++ b/bundle/tests/sync_include_exclude_no_matches_test.go @@ -13,7 +13,7 @@ import ( ) func TestSyncIncludeExcludeNoMatchesTest(t *testing.T) { - b := loadTarget(t, "./override_sync", "development") + b := loadTarget(t, "./sync/override", "development") diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.ValidateSyncPatterns()) require.Len(t, diags, 3) @@ -21,7 +21,7 @@ func TestSyncIncludeExcludeNoMatchesTest(t *testing.T) { require.Equal(t, diags[0].Severity, diag.Warning) require.Equal(t, diags[0].Summary, "Pattern dist does not match any files") - require.Equal(t, diags[0].Location.File, filepath.Join("override_sync", "databricks.yml")) + require.Equal(t, diags[0].Location.File, filepath.Join("sync", "override", "databricks.yml")) require.Equal(t, diags[0].Location.Line, 17) require.Equal(t, diags[0].Location.Column, 11) require.Equal(t, diags[0].Path.String(), "sync.exclude[0]") diff --git a/bundle/tests/sync_test.go b/bundle/tests/sync_test.go new file mode 100644 index 000000000..d08e889c3 --- /dev/null +++ b/bundle/tests/sync_test.go @@ -0,0 +1,65 @@ +package config_tests + +import ( + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/stretchr/testify/assert" +) + +func TestSyncOverride(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/override", "development") + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override", "prod") + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) +} + +func TestSyncOverrideNoRootSync(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/override_no_root", "development") + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override_no_root", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override_no_root", "prod") + assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) +} + +func TestSyncNil(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/nil", "development") + assert.Nil(t, b.Config.Sync.Include) + assert.Nil(t, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/nil", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) +} + +func TestSyncNilRoot(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/nil_root", "development") + assert.Nil(t, b.Config.Sync.Include) + assert.Nil(t, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/nil_root", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) +} diff --git a/bundle/tests/variables/complex/databricks.yml b/bundle/tests/variables/complex/databricks.yml new file mode 100644 index 000000000..ca27f606d --- /dev/null +++ b/bundle/tests/variables/complex/databricks.yml @@ -0,0 +1,51 @@ +bundle: + name: complex-variables + +resources: + jobs: + my_job: + job_clusters: + - job_cluster_key: key + new_cluster: ${var.cluster} + tasks: + - task_key: test + job_cluster_key: key + libraries: ${variables.libraries.value} + task_key: "task with spark version ${var.cluster.spark_version} and jar ${var.libraries[0].jar}" + +variables: + node_type: + default: "Standard_DS3_v2" + cluster: + type: complex + description: "A cluster definition" + default: + spark_version: "13.2.x-scala2.11" + node_type_id: ${var.node_type} + num_workers: 2 + policy_id: "some-policy-id" + spark_conf: + spark.speculation: true + spark.databricks.delta.retentionDurationCheck.enabled: false + spark.random: true + libraries: + type: complex + description: "A libraries definition" + default: + - jar: "/path/to/jar" + - egg: "/path/to/egg" + - whl: "/path/to/whl" + + +targets: + default: + dev: + variables: + node_type: "Standard_DS3_v3" + cluster: + spark_version: "14.2.x-scala2.11" + node_type_id: ${var.node_type} + num_workers: 4 + spark_conf: + spark.speculation: false + spark.databricks.delta.retentionDurationCheck.enabled: false diff --git a/bundle/tests/variables/empty/databricks.yml b/bundle/tests/variables/empty/databricks.yml new file mode 100644 index 000000000..f90f6211c --- /dev/null +++ b/bundle/tests/variables/empty/databricks.yml @@ -0,0 +1,7 @@ +variables: + a: + description: empty variable + default: + +bundle: + name: empty${var.a} diff --git a/bundle/tests/variables/env_overrides/databricks.yml b/bundle/tests/variables/env_overrides/databricks.yml index e8adb9566..560513bc3 100644 --- a/bundle/tests/variables/env_overrides/databricks.yml +++ b/bundle/tests/variables/env_overrides/databricks.yml @@ -8,14 +8,16 @@ variables: d: description: variable with lookup - lookup: - cluster: some-cluster + default: "" e: description: variable with lookup - lookup: - instance_pool: some-pool + default: "some-value" + f: + description: variable with lookup + lookup: + cluster_policy: wrong-cluster-policy bundle: name: test bundle @@ -49,4 +51,7 @@ targets: e: lookup: instance_pool: some-test-instance-pool + f: + lookup: + cluster_policy: some-test-cluster-policy b: prod-b diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index f51802684..51a23e5d5 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -6,7 +6,10 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -106,19 +109,40 @@ func TestVariablesWithoutDefinition(t *testing.T) { require.NoError(t, diags.Error()) require.True(t, b.Config.Variables["a"].HasValue()) require.True(t, b.Config.Variables["b"].HasValue()) - assert.Equal(t, "foo", *b.Config.Variables["a"].Value) - assert.Equal(t, "bar", *b.Config.Variables["b"].Value) + assert.Equal(t, "foo", b.Config.Variables["a"].Value) + assert.Equal(t, "bar", b.Config.Variables["b"].Value) } func TestVariablesWithTargetLookupOverrides(t *testing.T) { b := load(t, "./variables/env_overrides") + + mockWorkspaceClient := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(mockWorkspaceClient.WorkspaceClient) + instancePoolApi := mockWorkspaceClient.GetMockInstancePoolsAPI() + instancePoolApi.EXPECT().GetByInstancePoolName(mock.Anything, "some-test-instance-pool").Return(&compute.InstancePoolAndStats{ + InstancePoolId: "1234", + }, nil) + + clustersApi := mockWorkspaceClient.GetMockClustersAPI() + clustersApi.EXPECT().GetByClusterName(mock.Anything, "some-test-cluster").Return(&compute.ClusterDetails{ + ClusterId: "4321", + }, nil) + + clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI() + clusterPoliciesApi.EXPECT().GetByName(mock.Anything, "some-test-cluster-policy").Return(&compute.Policy{ + PolicyId: "9876", + }, nil) + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-overrides-lookup"), mutator.SetVariables(), + mutator.ResolveResourceReferences(), )) + require.NoError(t, diags.Error()) - assert.Equal(t, "cluster: some-test-cluster", b.Config.Variables["d"].Lookup.String()) - assert.Equal(t, "instance-pool: some-test-instance-pool", b.Config.Variables["e"].Lookup.String()) + assert.Equal(t, "4321", b.Config.Variables["d"].Value) + assert.Equal(t, "1234", b.Config.Variables["e"].Value) + assert.Equal(t, "9876", b.Config.Variables["f"].Value) } func TestVariableTargetOverrides(t *testing.T) { @@ -169,3 +193,9 @@ func TestVariableTargetOverrides(t *testing.T) { }) } } + +func TestBundleWithEmptyVariableLoads(t *testing.T) { + b := load(t, "./variables/empty") + diags := bundle.Apply(context.Background(), b, mutator.SetVariables()) + require.ErrorContains(t, diags.Error(), "no value assigned to required variable a") +} diff --git a/cmd/account/storage-credentials/storage-credentials.go b/cmd/account/storage-credentials/storage-credentials.go index 0a20b86b6..4280ae8c3 100755 --- a/cmd/account/storage-credentials/storage-credentials.go +++ b/cmd/account/storage-credentials/storage-credentials.go @@ -279,11 +279,8 @@ func newList() *cobra.Command { listReq.MetastoreId = args[0] - response, err := a.StorageCredentials.List(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.StorageCredentials.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 61a6c1f33..2fc8a314b 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -2,8 +2,9 @@ package auth import ( "context" + "errors" "fmt" - "os" + "io/fs" "sync" "time" @@ -95,7 +96,7 @@ func newProfilesCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { var profiles []*profileMetadata iniFile, err := profile.DefaultProfiler.Get(cmd.Context()) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { // return empty list for non-configured machines iniFile = &config.File{ File: &ini.File{}, diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index 6845ab672..c8c59c149 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -38,12 +38,10 @@ var nativeTemplates = []nativeTemplate{ { name: "default-sql", description: "The default SQL template for .sql files that run with Databricks SQL", - hidden: true, }, { name: "dbt-sql", - description: "The dbt SQL template (https://www.databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)", - hidden: true, + description: "The dbt SQL template (databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)", }, { name: "mlops-stacks", diff --git a/cmd/bundle/init_test.go b/cmd/bundle/init_test.go index aa8991596..475b2e149 100644 --- a/cmd/bundle/init_test.go +++ b/cmd/bundle/init_test.go @@ -30,6 +30,8 @@ func TestBundleInitRepoName(t *testing.T) { func TestNativeTemplateOptions(t *testing.T) { expected := []cmdio.Tuple{ {Name: "default-python", Id: "The default Python template for Notebooks / Delta Live Tables / Workflows"}, + {Name: "default-sql", Id: "The default SQL template for .sql files that run with Databricks SQL"}, + {Name: "dbt-sql", Id: "The dbt SQL template (databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)"}, {Name: "mlops-stacks", Id: "The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)"}, {Name: "custom...", Id: "Bring your own template"}, } @@ -38,6 +40,8 @@ func TestNativeTemplateOptions(t *testing.T) { func TestNativeTemplateHelpDescriptions(t *testing.T) { expected := `- default-python: The default Python template for Notebooks / Delta Live Tables / Workflows +- default-sql: The default SQL template for .sql files that run with Databricks SQL +- dbt-sql: The dbt SQL template (databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks) - mlops-stacks: The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)` assert.Equal(t, expected, nativeTemplateHelpDescriptions()) } diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index 72ad8eb3a..df3e087c2 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -72,7 +72,8 @@ func newSyncCommand() *cobra.Command { return s.RunContinuous(ctx) } - return s.RunOnce(ctx) + _, err = s.RunOnce(ctx) + return err } return cmd diff --git a/cmd/bundle/utils/utils.go b/cmd/bundle/utils/utils.go index d585c6220..ce3774cf5 100644 --- a/cmd/bundle/utils/utils.go +++ b/cmd/bundle/utils/utils.go @@ -20,19 +20,16 @@ func ConfigureBundleWithVariables(cmd *cobra.Command) (*bundle.Bundle, diag.Diag // Load bundle config and apply target b, diags := root.MustConfigureBundle(cmd) if diags.HasError() { - return nil, diags + return b, diags } variables, err := cmd.Flags().GetStringSlice("var") if err != nil { - return nil, diag.FromErr(err) + return b, diag.FromErr(err) } // Initialize variables by assigning them values passed as command line flags diags = diags.Extend(configureVariables(cmd, b, variables)) - if diags.HasError() { - return nil, diags - } return b, diags } diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index 8d49ec961..59a977047 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -3,123 +3,20 @@ package bundle import ( "encoding/json" "fmt" - "path/filepath" - "strings" - "text/template" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/validate" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/bundle/render" "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/flags" - "github.com/fatih/color" "github.com/spf13/cobra" ) -var validateFuncMap = template.FuncMap{ - "red": color.RedString, - "green": color.GreenString, - "blue": color.BlueString, - "yellow": color.YellowString, - "magenta": color.MagentaString, - "cyan": color.CyanString, - "bold": func(format string, a ...interface{}) string { - return color.New(color.Bold).Sprintf(format, a...) - }, - "italic": func(format string, a ...interface{}) string { - return color.New(color.Italic).Sprintf(format, a...) - }, -} - -const errorTemplate = `{{ "Error" | red }}: {{ .Summary }} - {{ "at " }}{{ .Path.String | green }} - {{ "in " }}{{ .Location.String | cyan }} - -` - -const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }} - {{ "at " }}{{ .Path.String | green }} - {{ "in " }}{{ .Location.String | cyan }} - -` - -const summaryTemplate = `Name: {{ .Config.Bundle.Name | bold }} -Target: {{ .Config.Bundle.Target | bold }} -Workspace: - Host: {{ .WorkspaceClient.Config.Host | bold }} - User: {{ .Config.Workspace.CurrentUser.UserName | bold }} - Path: {{ .Config.Workspace.RootPath | bold }} - -{{ .Trailer }} -` - -func pluralize(n int, singular, plural string) string { - if n == 1 { - return fmt.Sprintf("%d %s", n, singular) - } - return fmt.Sprintf("%d %s", n, plural) -} - -func buildTrailer(diags diag.Diagnostics) string { - parts := []string{} - if errors := len(diags.Filter(diag.Error)); errors > 0 { - parts = append(parts, color.RedString(pluralize(errors, "error", "errors"))) - } - if warnings := len(diags.Filter(diag.Warning)); warnings > 0 { - parts = append(parts, color.YellowString(pluralize(warnings, "warning", "warnings"))) - } - if len(parts) > 0 { - return fmt.Sprintf("Found %s", strings.Join(parts, " and ")) - } else { - return color.GreenString("Validation OK!") - } -} - -func renderTextOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnostics) error { - errorT := template.Must(template.New("error").Funcs(validateFuncMap).Parse(errorTemplate)) - warningT := template.Must(template.New("warning").Funcs(validateFuncMap).Parse(warningTemplate)) - - // Print errors and warnings. - for _, d := range diags { - var t *template.Template - switch d.Severity { - case diag.Error: - t = errorT - case diag.Warning: - t = warningT - } - - // Make file relative to bundle root - if d.Location.File != "" { - out, _ := filepath.Rel(b.RootPath, d.Location.File) - d.Location.File = out - } - - // Render the diagnostic with the appropriate template. - err := t.Execute(cmd.OutOrStdout(), d) - if err != nil { - return err - } - } - - // Print validation summary. - t := template.Must(template.New("summary").Funcs(validateFuncMap).Parse(summaryTemplate)) - err := t.Execute(cmd.OutOrStdout(), map[string]any{ - "Config": b.Config, - "Trailer": buildTrailer(diags), - "WorkspaceClient": b.WorkspaceClient(), - }) - if err != nil { - return err - } - - return diags.Error() -} - func renderJsonOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnostics) error { - buf, err := json.MarshalIndent(b.Config, "", " ") + buf, err := json.MarshalIndent(b.Config.Value().AsAny(), "", " ") if err != nil { return err } @@ -137,19 +34,35 @@ func newValidateCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() b, diags := utils.ConfigureBundleWithVariables(cmd) - if err := diags.Error(); err != nil { - return diags.Error() + + if b == nil { + if err := diags.Error(); err != nil { + return diags.Error() + } else { + return fmt.Errorf("invariant failed: returned bundle is nil") + } } - diags = diags.Extend(bundle.Apply(ctx, b, phases.Initialize())) - diags = diags.Extend(bundle.Apply(ctx, b, validate.Validate())) - if err := diags.Error(); err != nil { - return err + if !diags.HasError() { + diags = diags.Extend(bundle.Apply(ctx, b, phases.Initialize())) + } + + if !diags.HasError() { + diags = diags.Extend(bundle.Apply(ctx, b, validate.Validate())) } switch root.OutputType(cmd) { case flags.OutputText: - return renderTextOutput(cmd, b, diags) + err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags) + if err != nil { + return fmt.Errorf("failed to render output: %w", err) + } + + if diags.HasError() { + return root.ErrAlreadyPrinted + } + + return nil case flags.OutputJSON: return renderJsonOutput(cmd, b, diags) default: diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index 1e94ddae8..895a5902c 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -139,10 +139,11 @@ The host must be specified with the --host flag or the DATABRICKS_HOST environme // Save profile to config file. return databrickscfg.SaveToProfile(ctx, &config.Config{ - Profile: cfg.Profile, - Host: cfg.Host, - Token: cfg.Token, - ClusterID: cfg.ClusterID, + Profile: cfg.Profile, + Host: cfg.Host, + Token: cfg.Token, + ClusterID: cfg.ClusterID, + ConfigFile: cfg.ConfigFile, }) } diff --git a/cmd/configure/configure_test.go b/cmd/configure/configure_test.go index 259c83adb..a127fe57a 100644 --- a/cmd/configure/configure_test.go +++ b/cmd/configure/configure_test.go @@ -78,7 +78,8 @@ func TestConfigFileFromEnvNoInteractive(t *testing.T) { //TODO: Replace with similar test code from go SDK, once we start using it directly ctx := context.Background() tempHomeDir := setup(t) - cfgPath := filepath.Join(tempHomeDir, ".databrickscfg") + defaultCfgPath := filepath.Join(tempHomeDir, ".databrickscfg") + cfgPath := filepath.Join(tempHomeDir, "overwrite-databricks-cfg") t.Setenv("DATABRICKS_CONFIG_FILE", cfgPath) inp := getTempFileWithContent(t, tempHomeDir, "token\n") @@ -96,6 +97,13 @@ func TestConfigFileFromEnvNoInteractive(t *testing.T) { _, err = os.Stat(cfgPath) assert.NoError(t, err) + _, err = os.Stat(defaultCfgPath) + if runtime.GOOS == "windows" { + assert.ErrorContains(t, err, "cannot find the file specified") + } else { + assert.ErrorContains(t, err, "no such file or directory") + } + cfg, err := ini.Load(cfgPath) assert.NoError(t, err) diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index 4ed89c57b..8b98f2cf2 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -76,15 +76,11 @@ func configureBundle(cmd *cobra.Command, b *bundle.Bundle) (*bundle.Bundle, diag ctx := cmd.Context() diags := bundle.Apply(ctx, b, m) if diags.HasError() { - return nil, diags + return b, diags } // Configure the workspace profile if the flag has been set. diags = diags.Extend(configureProfile(cmd, b)) - if diags.HasError() { - return nil, diags - } - return b, diags } diff --git a/cmd/root/root.go b/cmd/root/root.go index 38eb42ccb..61baa4da0 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -2,6 +2,7 @@ package root import ( "context" + "errors" "fmt" "os" "strings" @@ -97,7 +98,7 @@ func Execute(cmd *cobra.Command) { // Run the command cmd, err := cmd.ExecuteContextC(ctx) - if err != nil { + if err != nil && !errors.Is(err, ErrAlreadyPrinted) { // If cmdio logger initialization succeeds, then this function logs with the // initialized cmdio logger, otherwise with the default cmdio logger cmdio.LogError(cmd.Context(), err) diff --git a/cmd/root/silent_err.go b/cmd/root/silent_err.go new file mode 100644 index 000000000..b361cc6b4 --- /dev/null +++ b/cmd/root/silent_err.go @@ -0,0 +1,7 @@ +package root + +import "errors" + +// ErrAlreadyPrinted is not printed to the user. It's used to signal that the command should exit with an error, +// but the error message was already printed. +var ErrAlreadyPrinted = errors.New("AlreadyPrinted") diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index e5f1bfc9e..bab451593 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -135,7 +135,7 @@ func New() *cobra.Command { if f.watch { err = s.RunContinuous(ctx) } else { - err = s.RunOnce(ctx) + _, err = s.RunOnce(ctx) } s.Close() diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index b741e7b16..564aeae56 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -16,7 +17,8 @@ import ( func TestSyncOptionsFromBundle(t *testing.T) { tempDir := t.TempDir() b := &bundle.Bundle{ - RootPath: tempDir, + RootPath: tempDir, + BundleRoot: vfs.MustNew(tempDir), Config: config.Root{ Bundle: config.Bundle{ Target: "default", diff --git a/cmd/workspace/alerts/alerts.go b/cmd/workspace/alerts/alerts.go index d4a7d02af..61c1e0eab 100755 --- a/cmd/workspace/alerts/alerts.go +++ b/cmd/workspace/alerts/alerts.go @@ -24,7 +24,12 @@ func New() *cobra.Command { Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the sql_task type of - the Jobs API, e.g. :method:jobs/create.`, + the Jobs API, e.g. :method:jobs/create. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -73,7 +78,12 @@ func newCreate() *cobra.Command { Creates an alert. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies users or notification - destinations if the condition was met.` + destinations if the condition was met. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -131,8 +141,13 @@ func newDelete() *cobra.Command { cmd.Long = `Delete an alert. Deletes an alert. Deleted alerts are no longer accessible and cannot be - restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to - the trash.` + restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to + the trash. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -199,7 +214,12 @@ func newGet() *cobra.Command { cmd.Short = `Get an alert.` cmd.Long = `Get an alert. - Gets an alert.` + Gets an alert. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -261,7 +281,12 @@ func newList() *cobra.Command { cmd.Short = `Get alerts.` cmd.Long = `Get alerts. - Gets a list of alerts.` + Gets a list of alerts. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -312,7 +337,12 @@ func newUpdate() *cobra.Command { cmd.Short = `Update an alert.` cmd.Long = `Update an alert. - Updates an alert.` + Updates an alert. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index 1d6de4775..1572d4f4b 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -35,13 +35,14 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreate()) - cmd.AddCommand(newCreateDeployment()) cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeploy()) cmd.AddCommand(newGet()) cmd.AddCommand(newGetDeployment()) cmd.AddCommand(newGetEnvironment()) cmd.AddCommand(newList()) cmd.AddCommand(newListDeployments()) + cmd.AddCommand(newStart()) cmd.AddCommand(newStop()) cmd.AddCommand(newUpdate()) @@ -79,15 +80,14 @@ func newCreate() *cobra.Command { cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `The description of the app.`) cmd.Use = "create NAME" - cmd.Short = `Create an App.` - cmd.Long = `Create an App. + cmd.Short = `Create an app.` + cmd.Long = `Create an app. Creates a new app. Arguments: NAME: The name of the app. The name must contain only lowercase alphanumeric - characters and hyphens and be between 2 and 30 characters long. It must be - unique within the workspace.` + characters and hyphens. It must be unique within the workspace.` cmd.Annotations = make(map[string]string) @@ -156,107 +156,6 @@ func newCreate() *cobra.Command { return cmd } -// start create-deployment command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var createDeploymentOverrides []func( - *cobra.Command, - *serving.CreateAppDeploymentRequest, -) - -func newCreateDeployment() *cobra.Command { - cmd := &cobra.Command{} - - var createDeploymentReq serving.CreateAppDeploymentRequest - var createDeploymentJson flags.JsonFlag - - var createDeploymentSkipWait bool - var createDeploymentTimeout time.Duration - - cmd.Flags().BoolVar(&createDeploymentSkipWait, "no-wait", createDeploymentSkipWait, `do not wait to reach SUCCEEDED state`) - cmd.Flags().DurationVar(&createDeploymentTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach SUCCEEDED state`) - // TODO: short flags - cmd.Flags().Var(&createDeploymentJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Use = "create-deployment APP_NAME SOURCE_CODE_PATH" - cmd.Short = `Create an App Deployment.` - cmd.Long = `Create an App Deployment. - - Creates an app deployment for the app with the supplied name. - - Arguments: - APP_NAME: The name of the app. - SOURCE_CODE_PATH: The source code path of the deployment.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - if cmd.Flags().Changed("json") { - err := root.ExactArgs(1)(cmd, args) - if err != nil { - return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'source_code_path' in your JSON input") - } - return nil - } - check := root.ExactArgs(2) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - err = createDeploymentJson.Unmarshal(&createDeploymentReq) - if err != nil { - return err - } - } - createDeploymentReq.AppName = args[0] - if !cmd.Flags().Changed("json") { - createDeploymentReq.SourceCodePath = args[1] - } - - wait, err := w.Apps.CreateDeployment(ctx, createDeploymentReq) - if err != nil { - return err - } - if createDeploymentSkipWait { - return cmdio.Render(ctx, wait.Response) - } - spinner := cmdio.Spinner(ctx) - info, err := wait.OnProgress(func(i *serving.AppDeployment) { - if i.Status == nil { - return - } - status := i.Status.State - statusMessage := fmt.Sprintf("current status: %s", status) - if i.Status != nil { - statusMessage = i.Status.Message - } - spinner <- statusMessage - }).GetWithTimeout(createDeploymentTimeout) - close(spinner) - if err != nil { - return err - } - return cmdio.Render(ctx, info) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range createDeploymentOverrides { - fn(cmd, &createDeploymentReq) - } - - return cmd -} - // start delete command // Slice with functions to override default command behavior. @@ -274,8 +173,8 @@ func newDelete() *cobra.Command { // TODO: short flags cmd.Use = "delete NAME" - cmd.Short = `Delete an App.` - cmd.Long = `Delete an App. + cmd.Short = `Delete an app.` + cmd.Long = `Delete an app. Deletes an app. @@ -315,6 +214,120 @@ func newDelete() *cobra.Command { return cmd } +// start deploy command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deployOverrides []func( + *cobra.Command, + *serving.CreateAppDeploymentRequest, +) + +func newDeploy() *cobra.Command { + cmd := &cobra.Command{} + + var deployReq serving.CreateAppDeploymentRequest + var deployJson flags.JsonFlag + + var deploySkipWait bool + var deployTimeout time.Duration + + cmd.Flags().BoolVar(&deploySkipWait, "no-wait", deploySkipWait, `do not wait to reach SUCCEEDED state`) + cmd.Flags().DurationVar(&deployTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach SUCCEEDED state`) + // TODO: short flags + cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "deploy APP_NAME SOURCE_CODE_PATH MODE" + cmd.Short = `Create an app deployment.` + cmd.Long = `Create an app deployment. + + Creates an app deployment for the app with the supplied name. + + Arguments: + APP_NAME: The name of the app. + SOURCE_CODE_PATH: The workspace file system path of the source code used to create the app + deployment. This is different from + deployment_artifacts.source_code_path, which is the path used by the + deployed app. The former refers to the original source code location of + the app in the workspace during deployment creation, whereas the latter + provides a system generated stable snapshotted source code path used by + the deployment. + MODE: The mode of which the deployment will manage the source code.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'source_code_path', 'mode' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = deployJson.Unmarshal(&deployReq) + if err != nil { + return err + } + } + deployReq.AppName = args[0] + if !cmd.Flags().Changed("json") { + deployReq.SourceCodePath = args[1] + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &deployReq.Mode) + if err != nil { + return fmt.Errorf("invalid MODE: %s", args[2]) + } + } + + wait, err := w.Apps.Deploy(ctx, deployReq) + if err != nil { + return err + } + if deploySkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *serving.AppDeployment) { + if i.Status == nil { + return + } + status := i.Status.State + statusMessage := fmt.Sprintf("current status: %s", status) + if i.Status != nil { + statusMessage = i.Status.Message + } + spinner <- statusMessage + }).GetWithTimeout(deployTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deployOverrides { + fn(cmd, &deployReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. @@ -332,8 +345,8 @@ func newGet() *cobra.Command { // TODO: short flags cmd.Use = "get NAME" - cmd.Short = `Get an App.` - cmd.Long = `Get an App. + cmd.Short = `Get an app.` + cmd.Long = `Get an app. Retrieves information for the app with the supplied name. @@ -390,8 +403,8 @@ func newGetDeployment() *cobra.Command { // TODO: short flags cmd.Use = "get-deployment APP_NAME DEPLOYMENT_ID" - cmd.Short = `Get an App Deployment.` - cmd.Long = `Get an App Deployment. + cmd.Short = `Get an app deployment.` + cmd.Long = `Get an app deployment. Retrieves information for the app deployment with the supplied name and deployment id. @@ -451,8 +464,8 @@ func newGetEnvironment() *cobra.Command { // TODO: short flags cmd.Use = "get-environment NAME" - cmd.Short = `Get App Environment.` - cmd.Long = `Get App Environment. + cmd.Short = `Get app environment.` + cmd.Long = `Get app environment. Retrieves app environment. @@ -512,8 +525,8 @@ func newList() *cobra.Command { cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Pagination token to go to the next page of apps.`) cmd.Use = "list" - cmd.Short = `List Apps.` - cmd.Long = `List Apps. + cmd.Short = `List apps.` + cmd.Long = `List apps. Lists all apps in the workspace.` @@ -565,8 +578,8 @@ func newListDeployments() *cobra.Command { cmd.Flags().StringVar(&listDeploymentsReq.PageToken, "page-token", listDeploymentsReq.PageToken, `Pagination token to go to the next page of apps.`) cmd.Use = "list-deployments APP_NAME" - cmd.Short = `List App Deployments.` - cmd.Long = `List App Deployments. + cmd.Short = `List app deployments.` + cmd.Long = `List app deployments. Lists all app deployments for the app with the supplied name. @@ -603,6 +616,64 @@ func newListDeployments() *cobra.Command { return cmd } +// start start command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var startOverrides []func( + *cobra.Command, + *serving.StartAppRequest, +) + +func newStart() *cobra.Command { + cmd := &cobra.Command{} + + var startReq serving.StartAppRequest + + // TODO: short flags + + cmd.Use = "start NAME" + cmd.Short = `Start an app.` + cmd.Long = `Start an app. + + Start the last active deployment of the app in the workspace. + + Arguments: + NAME: The name of the app.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + startReq.Name = args[0] + + response, err := w.Apps.Start(ctx, startReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range startOverrides { + fn(cmd, &startReq) + } + + return cmd +} + // start stop command // Slice with functions to override default command behavior. @@ -620,8 +691,8 @@ func newStop() *cobra.Command { // TODO: short flags cmd.Use = "stop NAME" - cmd.Short = `Stop an App.` - cmd.Long = `Stop an App. + cmd.Short = `Stop an app.` + cmd.Long = `Stop an app. Stops the active deployment of the app in the workspace. @@ -682,15 +753,14 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `The description of the app.`) cmd.Use = "update NAME" - cmd.Short = `Update an App.` - cmd.Long = `Update an App. + cmd.Short = `Update an app.` + cmd.Long = `Update an app. Updates the app with the supplied name. Arguments: NAME: The name of the app. The name must contain only lowercase alphanumeric - characters and hyphens and be between 2 and 30 characters long. It must be - unique within the workspace.` + characters and hyphens. It must be unique within the workspace.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 8085b69e2..a17bb0072 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -273,6 +273,8 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include catalogs in the response for which the principal can only access selective metadata for.`) + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of catalogs to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) cmd.Use = "list" cmd.Short = `List catalogs.` diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index f4baab3b2..abde1bb71 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -200,6 +200,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, + LEGACY_SINGLE_USER_STANDARD, LEGACY_TABLE_ACL, NONE, SINGLE_USER, @@ -445,6 +446,7 @@ func newEdit() *cobra.Command { cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, + LEGACY_SINGLE_USER_STANDARD, LEGACY_TABLE_ACL, NONE, SINGLE_USER, diff --git a/cmd/workspace/consumer-listings/consumer-listings.go b/cmd/workspace/consumer-listings/consumer-listings.go index 8669dfae5..18f3fb39e 100755 --- a/cmd/workspace/consumer-listings/consumer-listings.go +++ b/cmd/workspace/consumer-listings/consumer-listings.go @@ -31,6 +31,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newBatchGet()) cmd.AddCommand(newGet()) cmd.AddCommand(newList()) cmd.AddCommand(newSearch()) @@ -43,6 +44,62 @@ func New() *cobra.Command { return cmd } +// start batch-get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var batchGetOverrides []func( + *cobra.Command, + *marketplace.BatchGetListingsRequest, +) + +func newBatchGet() *cobra.Command { + cmd := &cobra.Command{} + + var batchGetReq marketplace.BatchGetListingsRequest + + // TODO: short flags + + // TODO: array: ids + + cmd.Use = "batch-get" + cmd.Short = `Get one batch of listings.` + cmd.Long = `Get one batch of listings. One may specify up to 50 IDs per request. + + Batch get a published listing in the Databricks Marketplace that the consumer + has access to.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.ConsumerListings.BatchGet(ctx, batchGetReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range batchGetOverrides { + fn(cmd, &batchGetReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/consumer-providers/consumer-providers.go b/cmd/workspace/consumer-providers/consumer-providers.go index d8ac0ec12..579a89516 100755 --- a/cmd/workspace/consumer-providers/consumer-providers.go +++ b/cmd/workspace/consumer-providers/consumer-providers.go @@ -30,6 +30,7 @@ func New() *cobra.Command { } // Add methods + cmd.AddCommand(newBatchGet()) cmd.AddCommand(newGet()) cmd.AddCommand(newList()) @@ -41,6 +42,62 @@ func New() *cobra.Command { return cmd } +// start batch-get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var batchGetOverrides []func( + *cobra.Command, + *marketplace.BatchGetProvidersRequest, +) + +func newBatchGet() *cobra.Command { + cmd := &cobra.Command{} + + var batchGetReq marketplace.BatchGetProvidersRequest + + // TODO: short flags + + // TODO: array: ids + + cmd.Use = "batch-get" + cmd.Short = `Get one batch of providers.` + cmd.Long = `Get one batch of providers. One may specify up to 50 IDs per request. + + Batch get a provider in the Databricks Marketplace with at least one visible + listing.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.ConsumerProviders.BatchGet(ctx, batchGetReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range batchGetOverrides { + fn(cmd, &batchGetReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index 1a143538b..fcab0aa2a 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -268,8 +268,8 @@ func newList() *cobra.Command { Fetch a paginated list of dashboard objects. - ### **Warning: Calling this API concurrently 10 or more times could result in - throttling, service degradation, or a temporary ban.**` + **Warning**: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban.` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/data-sources/data-sources.go b/cmd/workspace/data-sources/data-sources.go index 0f0f8541e..f310fe50a 100755 --- a/cmd/workspace/data-sources/data-sources.go +++ b/cmd/workspace/data-sources/data-sources.go @@ -25,7 +25,12 @@ func New() *cobra.Command { This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or grep to search the response from this API for the name of your SQL warehouse - as it appears in Databricks SQL.`, + as it appears in Databricks SQL. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -60,7 +65,12 @@ func newList() *cobra.Command { Retrieves a full list of SQL warehouses available in this workspace. All fields that appear in this API response are enumerated for clarity. However, - you need only a SQL warehouse's id to create new queries against it.` + you need only a SQL warehouse's id to create new queries against it. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index bd63d3fa4..8f0dd346a 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -348,6 +348,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`) // TODO: complex arg: encryption_details cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the external location.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Indicates whether the external location is read-only.`) diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index 1aa6daf38..c8de48797 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -69,6 +69,8 @@ func newCreate() *cobra.Command { cmd.Short = `Create a function.` cmd.Long = `Create a function. + **WARNING: This API is experimental and will change in future versions** + Creates a new function The user must have the following permissions in order for the function to be diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index e31c3f086..50a045921 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -1502,24 +1502,15 @@ func newSubmit() *cobra.Command { cmd.Flags().Var(&submitJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: access_control_list - // TODO: complex arg: condition_task - // TODO: complex arg: dbt_task // TODO: complex arg: email_notifications + // TODO: array: environments // TODO: complex arg: git_source // TODO: complex arg: health cmd.Flags().StringVar(&submitReq.IdempotencyToken, "idempotency-token", submitReq.IdempotencyToken, `An optional token that can be used to guarantee the idempotency of job run requests.`) - // TODO: complex arg: notebook_task // TODO: complex arg: notification_settings - // TODO: complex arg: pipeline_task - // TODO: complex arg: python_wheel_task // TODO: complex arg: queue // TODO: complex arg: run_as - // TODO: complex arg: run_job_task cmd.Flags().StringVar(&submitReq.RunName, "run-name", submitReq.RunName, `An optional name for the run.`) - // TODO: complex arg: spark_jar_task - // TODO: complex arg: spark_python_task - // TODO: complex arg: spark_submit_task - // TODO: complex arg: sql_task // TODO: array: tasks cmd.Flags().IntVar(&submitReq.TimeoutSeconds, "timeout-seconds", submitReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`) // TODO: complex arg: webhook_notifications diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 566853ff9..36eab0e7f 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -31,13 +31,23 @@ func New() *cobra.Command { // Add methods cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateSchedule()) + cmd.AddCommand(newCreateSubscription()) + cmd.AddCommand(newDeleteSchedule()) + cmd.AddCommand(newDeleteSubscription()) cmd.AddCommand(newGet()) cmd.AddCommand(newGetPublished()) + cmd.AddCommand(newGetSchedule()) + cmd.AddCommand(newGetSubscription()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListSchedules()) + cmd.AddCommand(newListSubscriptions()) cmd.AddCommand(newMigrate()) cmd.AddCommand(newPublish()) cmd.AddCommand(newTrash()) cmd.AddCommand(newUnpublish()) cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdateSchedule()) // Apply optional overrides to this command. for _, fn := range cmdOverrides { @@ -126,6 +136,277 @@ func newCreate() *cobra.Command { return cmd } +// start create-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createScheduleOverrides []func( + *cobra.Command, + *dashboards.CreateScheduleRequest, +) + +func newCreateSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var createScheduleReq dashboards.CreateScheduleRequest + var createScheduleJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createScheduleReq.DisplayName, "display-name", createScheduleReq.DisplayName, `The display name for schedule.`) + cmd.Flags().Var(&createScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + + cmd.Use = "create-schedule DASHBOARD_ID" + cmd.Short = `Create dashboard schedule.` + cmd.Long = `Create dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createScheduleJson.Unmarshal(&createScheduleReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createScheduleReq.DashboardId = args[0] + + response, err := w.Lakeview.CreateSchedule(ctx, createScheduleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createScheduleOverrides { + fn(cmd, &createScheduleReq) + } + + return cmd +} + +// start create-subscription command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createSubscriptionOverrides []func( + *cobra.Command, + *dashboards.CreateSubscriptionRequest, +) + +func newCreateSubscription() *cobra.Command { + cmd := &cobra.Command{} + + var createSubscriptionReq dashboards.CreateSubscriptionRequest + var createSubscriptionJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createSubscriptionJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create-subscription DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Create schedule subscription.` + cmd.Long = `Create schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createSubscriptionJson.Unmarshal(&createSubscriptionReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createSubscriptionReq.DashboardId = args[0] + createSubscriptionReq.ScheduleId = args[1] + + response, err := w.Lakeview.CreateSubscription(ctx, createSubscriptionReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createSubscriptionOverrides { + fn(cmd, &createSubscriptionReq) + } + + return cmd +} + +// start delete-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteScheduleOverrides []func( + *cobra.Command, + *dashboards.DeleteScheduleRequest, +) + +func newDeleteSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var deleteScheduleReq dashboards.DeleteScheduleRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteScheduleReq.Etag, "etag", deleteScheduleReq.Etag, `The etag for the schedule.`) + + cmd.Use = "delete-schedule DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Delete dashboard schedule.` + cmd.Long = `Delete dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteScheduleReq.DashboardId = args[0] + deleteScheduleReq.ScheduleId = args[1] + + err = w.Lakeview.DeleteSchedule(ctx, deleteScheduleReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteScheduleOverrides { + fn(cmd, &deleteScheduleReq) + } + + return cmd +} + +// start delete-subscription command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteSubscriptionOverrides []func( + *cobra.Command, + *dashboards.DeleteSubscriptionRequest, +) + +func newDeleteSubscription() *cobra.Command { + cmd := &cobra.Command{} + + var deleteSubscriptionReq dashboards.DeleteSubscriptionRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteSubscriptionReq.Etag, "etag", deleteSubscriptionReq.Etag, `The etag for the subscription.`) + + cmd.Use = "delete-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID" + cmd.Short = `Delete schedule subscription.` + cmd.Long = `Delete schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. + SUBSCRIPTION_ID: UUID identifying the subscription.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteSubscriptionReq.DashboardId = args[0] + deleteSubscriptionReq.ScheduleId = args[1] + deleteSubscriptionReq.SubscriptionId = args[2] + + err = w.Lakeview.DeleteSubscription(ctx, deleteSubscriptionReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteSubscriptionOverrides { + fn(cmd, &deleteSubscriptionReq) + } + + return cmd +} + // start get command // Slice with functions to override default command behavior. @@ -242,6 +523,303 @@ func newGetPublished() *cobra.Command { return cmd } +// start get-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getScheduleOverrides []func( + *cobra.Command, + *dashboards.GetScheduleRequest, +) + +func newGetSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var getScheduleReq dashboards.GetScheduleRequest + + // TODO: short flags + + cmd.Use = "get-schedule DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Get dashboard schedule.` + cmd.Long = `Get dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getScheduleReq.DashboardId = args[0] + getScheduleReq.ScheduleId = args[1] + + response, err := w.Lakeview.GetSchedule(ctx, getScheduleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getScheduleOverrides { + fn(cmd, &getScheduleReq) + } + + return cmd +} + +// start get-subscription command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getSubscriptionOverrides []func( + *cobra.Command, + *dashboards.GetSubscriptionRequest, +) + +func newGetSubscription() *cobra.Command { + cmd := &cobra.Command{} + + var getSubscriptionReq dashboards.GetSubscriptionRequest + + // TODO: short flags + + cmd.Use = "get-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID" + cmd.Short = `Get schedule subscription.` + cmd.Long = `Get schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. + SUBSCRIPTION_ID: UUID identifying the subscription.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getSubscriptionReq.DashboardId = args[0] + getSubscriptionReq.ScheduleId = args[1] + getSubscriptionReq.SubscriptionId = args[2] + + response, err := w.Lakeview.GetSubscription(ctx, getSubscriptionReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getSubscriptionOverrides { + fn(cmd, &getSubscriptionReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *dashboards.ListDashboardsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq dashboards.ListDashboardsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The number of dashboards to return per page.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token, received from a previous ListDashboards call.`) + cmd.Flags().BoolVar(&listReq.ShowTrashed, "show-trashed", listReq.ShowTrashed, `The flag to include dashboards located in the trash.`) + cmd.Flags().Var(&listReq.View, "view", `Indicates whether to include all metadata from the dashboard in the response. Supported values: [DASHBOARD_VIEW_BASIC, DASHBOARD_VIEW_FULL]`) + + cmd.Use = "list" + cmd.Short = `List dashboards.` + cmd.Long = `List dashboards.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.Lakeview.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start list-schedules command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listSchedulesOverrides []func( + *cobra.Command, + *dashboards.ListSchedulesRequest, +) + +func newListSchedules() *cobra.Command { + cmd := &cobra.Command{} + + var listSchedulesReq dashboards.ListSchedulesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listSchedulesReq.PageSize, "page-size", listSchedulesReq.PageSize, `The number of schedules to return per page.`) + cmd.Flags().StringVar(&listSchedulesReq.PageToken, "page-token", listSchedulesReq.PageToken, `A page token, received from a previous ListSchedules call.`) + + cmd.Use = "list-schedules DASHBOARD_ID" + cmd.Short = `List dashboard schedules.` + cmd.Long = `List dashboard schedules. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listSchedulesReq.DashboardId = args[0] + + response := w.Lakeview.ListSchedules(ctx, listSchedulesReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listSchedulesOverrides { + fn(cmd, &listSchedulesReq) + } + + return cmd +} + +// start list-subscriptions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listSubscriptionsOverrides []func( + *cobra.Command, + *dashboards.ListSubscriptionsRequest, +) + +func newListSubscriptions() *cobra.Command { + cmd := &cobra.Command{} + + var listSubscriptionsReq dashboards.ListSubscriptionsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listSubscriptionsReq.PageSize, "page-size", listSubscriptionsReq.PageSize, `The number of subscriptions to return per page.`) + cmd.Flags().StringVar(&listSubscriptionsReq.PageToken, "page-token", listSubscriptionsReq.PageToken, `A page token, received from a previous ListSubscriptions call.`) + + cmd.Use = "list-subscriptions DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `List schedule subscriptions.` + cmd.Long = `List schedule subscriptions. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listSubscriptionsReq.DashboardId = args[0] + listSubscriptionsReq.ScheduleId = args[1] + + response := w.Lakeview.ListSubscriptions(ctx, listSubscriptionsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listSubscriptionsOverrides { + fn(cmd, &listSubscriptionsReq) + } + + return cmd +} + // start migrate command // Slice with functions to override default command behavior. @@ -576,4 +1154,79 @@ func newUpdate() *cobra.Command { return cmd } +// start update-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateScheduleOverrides []func( + *cobra.Command, + *dashboards.UpdateScheduleRequest, +) + +func newUpdateSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var updateScheduleReq dashboards.UpdateScheduleRequest + var updateScheduleJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateScheduleReq.DisplayName, "display-name", updateScheduleReq.DisplayName, `The display name for schedule.`) + cmd.Flags().StringVar(&updateScheduleReq.Etag, "etag", updateScheduleReq.Etag, `The etag for the schedule.`) + cmd.Flags().Var(&updateScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + + cmd.Use = "update-schedule DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Update dashboard schedule.` + cmd.Long = `Update dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateScheduleJson.Unmarshal(&updateScheduleReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateScheduleReq.DashboardId = args[0] + updateScheduleReq.ScheduleId = args[1] + + response, err := w.Lakeview.UpdateSchedule(ctx, updateScheduleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateScheduleOverrides { + fn(cmd, &updateScheduleReq) + } + + return cmd +} + // end service Lakeview diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index b96eb7154..650131974 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -23,7 +23,12 @@ func New() *cobra.Command { Long: `These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the - sql_task type of the Jobs API, e.g. :method:jobs/create.`, + sql_task type of the Jobs API, e.g. :method:jobs/create. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", @@ -76,7 +81,12 @@ func newCreate() *cobra.Command { available SQL warehouses. Or you can copy the data_source_id from an existing query. - **Note**: You cannot add a visualization until you create the query.` + **Note**: You cannot add a visualization until you create the query. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -135,7 +145,12 @@ func newDelete() *cobra.Command { Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. The trash is - deleted after 30 days.` + deleted after 30 days. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -203,7 +218,12 @@ func newGet() *cobra.Command { cmd.Long = `Get a query definition. Retrieve a query object definition along with contextual permissions - information about the currently authenticated user.` + information about the currently authenticated user. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -278,8 +298,13 @@ func newList() *cobra.Command { Gets a list of queries. Optionally, this list can be filtered by a search term. - ### **Warning: Calling this API concurrently 10 or more times could result in - throttling, service degradation, or a temporary ban.**` + **Warning**: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -330,7 +355,12 @@ func newRestore() *cobra.Command { cmd.Long = `Restore a query. Restore a query that has been moved to the trash. A restored query appears in - list views and searches. You can use restored queries for alerts.` + list views and searches. You can use restored queries for alerts. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -409,7 +439,12 @@ func newUpdate() *cobra.Command { Modify this query definition. - **Note**: You cannot undo this operation.` + **Note**: You cannot undo this operation. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 325945031..18656a61c 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -366,6 +366,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`) // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index dff8176ea..158474770 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -42,6 +42,7 @@ func New() *cobra.Command { cmd.AddCommand(newGetIndex()) cmd.AddCommand(newListIndexes()) cmd.AddCommand(newQueryIndex()) + cmd.AddCommand(newQueryNextPage()) cmd.AddCommand(newScanIndex()) cmd.AddCommand(newSyncIndex()) cmd.AddCommand(newUpsertDataVectorIndex()) @@ -416,6 +417,7 @@ func newQueryIndex() *cobra.Command { cmd.Flags().StringVar(&queryIndexReq.FiltersJson, "filters-json", queryIndexReq.FiltersJson, `JSON string representing query filters.`) cmd.Flags().IntVar(&queryIndexReq.NumResults, "num-results", queryIndexReq.NumResults, `Number of results to return.`) cmd.Flags().StringVar(&queryIndexReq.QueryText, "query-text", queryIndexReq.QueryText, `Query text.`) + cmd.Flags().StringVar(&queryIndexReq.QueryType, "query-type", queryIndexReq.QueryType, `The query type to use.`) // TODO: array: query_vector cmd.Flags().Float64Var(&queryIndexReq.ScoreThreshold, "score-threshold", queryIndexReq.ScoreThreshold, `Threshold for the approximate nearest neighbor search.`) @@ -469,6 +471,76 @@ func newQueryIndex() *cobra.Command { return cmd } +// start query-next-page command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var queryNextPageOverrides []func( + *cobra.Command, + *vectorsearch.QueryVectorIndexNextPageRequest, +) + +func newQueryNextPage() *cobra.Command { + cmd := &cobra.Command{} + + var queryNextPageReq vectorsearch.QueryVectorIndexNextPageRequest + var queryNextPageJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&queryNextPageJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&queryNextPageReq.EndpointName, "endpoint-name", queryNextPageReq.EndpointName, `Name of the endpoint.`) + cmd.Flags().StringVar(&queryNextPageReq.PageToken, "page-token", queryNextPageReq.PageToken, `Page token returned from previous QueryVectorIndex or QueryVectorIndexNextPage API.`) + + cmd.Use = "query-next-page INDEX_NAME" + cmd.Short = `Query next page.` + cmd.Long = `Query next page. + + Use next_page_token returned from previous QueryVectorIndex or + QueryVectorIndexNextPage request to fetch next page of results. + + Arguments: + INDEX_NAME: Name of the vector index to query.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = queryNextPageJson.Unmarshal(&queryNextPageReq) + if err != nil { + return err + } + } + queryNextPageReq.IndexName = args[0] + + response, err := w.VectorSearchIndexes.QueryNextPage(ctx, queryNextPageReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range queryNextPageOverrides { + fn(cmd, &queryNextPageReq) + } + + return cmd +} + // start scan-index command // Slice with functions to override default command behavior. diff --git a/go.mod b/go.mod index 1b6c9aeb3..385a93b09 100644 --- a/go.mod +++ b/go.mod @@ -1,11 +1,11 @@ module github.com/databricks/cli -go 1.21 +go 1.22 require ( github.com/Masterminds/semver/v3 v3.2.1 // MIT - github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.41.0 // Apache 2.0 + github.com/briandowns/spinner v1.23.1 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.43.0 // Apache 2.0 github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause @@ -18,20 +18,22 @@ require ( github.com/nwidger/jsoncolor v0.3.2 // MIT github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // BSD-2-Clause github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // MIT - github.com/spf13/cobra v1.8.0 // Apache 2.0 + github.com/spf13/cobra v1.8.1 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause github.com/stretchr/testify v1.9.0 // MIT golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 - golang.org/x/mod v0.17.0 - golang.org/x/oauth2 v0.20.0 + golang.org/x/mod v0.18.0 + golang.org/x/oauth2 v0.21.0 golang.org/x/sync v0.7.0 - golang.org/x/term v0.20.0 - golang.org/x/text v0.15.0 + golang.org/x/term v0.21.0 + golang.org/x/text v0.16.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 gopkg.in/yaml.v3 v3.0.1 ) require ( + cloud.google.com/go/auth v0.4.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect @@ -42,7 +44,7 @@ require ( github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect @@ -57,13 +59,13 @@ require ( go.opentelemetry.io/otel v1.24.0 // indirect go.opentelemetry.io/otel/metric v1.24.0 // indirect go.opentelemetry.io/otel/trace v1.24.0 // indirect - golang.org/x/crypto v0.22.0 // indirect - golang.org/x/net v0.24.0 // indirect - golang.org/x/sys v0.20.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.21.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.169.0 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 // indirect - google.golang.org/grpc v1.62.0 // indirect - google.golang.org/protobuf v1.33.0 // indirect + google.golang.org/api v0.182.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 723057ad9..864b7919b 100644 --- a/go.sum +++ b/go.sum @@ -1,4 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= +cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= @@ -12,8 +16,8 @@ github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97 github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= -github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= +github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Ptu650= +github.com/briandowns/spinner v1.23.1/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -25,11 +29,11 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.41.0 h1:OyhYY+Q6+gqkWeXmpGEiacoU2RStTeWPF0x4vmqbQdc= -github.com/databricks/databricks-sdk-go v0.41.0/go.mod h1:rLIhh7DvifVLmf2QxMr/vMRGqdrTZazn8VYo4LilfCo= +github.com/databricks/databricks-sdk-go v0.43.0 h1:x4laolWhYlsQg2t8yWEGyRPZy4/Wv3pKnLEoJfVin7I= +github.com/databricks/databricks-sdk-go v0.43.0/go.mod h1:a9rr0FOHLL26kOjQjZZVFjIYmRABCbrAWVeundDEVG8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -71,9 +75,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -81,7 +84,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -93,8 +95,8 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.2 h1:mhN09QQW1jEWeMF74zGR81R30z4VJzjZsfkUhuHF+DA= -github.com/googleapis/gax-go/v2 v2.12.2/go.mod h1:61M8vcyyXR2kqKFxKrfA22jaA8JGF7Dc8App1U3H6jc= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= @@ -137,8 +139,8 @@ github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= @@ -170,27 +172,27 @@ go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA= -golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.18.0 h1:5+9lSbEzPSdWkH32vYPBwEpX8KwDbM52Ud9xBUvNlb0= +golang.org/x/mod v0.18.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -206,14 +208,14 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= -golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= -golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= +golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws= +golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.21.0 h1:WVXCp+/EBEHOj53Rvu+7KiT/iElMrO8ACK16SMZ3jaA= +golang.org/x/term v0.21.0/go.mod h1:ooXLefLobQVslOqselCNF4SxFAaoS6KujMbsGzSDmX0= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= -golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -221,25 +223,25 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= -golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.169.0 h1:QwWPy71FgMWqJN/l6jVlFHUa29a7dcUy02I8o799nPY= -google.golang.org/api v0.169.0/go.mod h1:gpNOiMA2tZ4mf5R9Iwf4rK/Dcz0fbdIgWYWVoxmsyLg= +google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE= +google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78 h1:Xs9lu+tLXxLIfuci70nG4cpwaRC+mRQPUL7LoIeDJC4= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240304161311-37d4d3c04a78/go.mod h1:UCOku4NytXMJuLQE5VuqA5lX3PcHCBo8pxNyvkf4xBs= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.62.0 h1:HQKZ/fa1bXkX1oFOvSjmZEUL8wLSaZTjCcLAlmZRtdk= -google.golang.org/grpc v1.62.0/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -249,10 +251,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI= -google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 560a0474b..a17964b16 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -51,6 +51,13 @@ func writeConfigFile(t *testing.T, config map[string]any) (string, error) { return filepath, err } +func validateBundle(t *testing.T, ctx context.Context, path string) ([]byte, error) { + t.Setenv("BUNDLE_ROOT", path) + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "validate", "--output", "json") + stdout, _, err := c.Run() + return stdout.Bytes(), err +} + func deployBundle(t *testing.T, ctx context.Context, path string) error { t.Setenv("BUNDLE_ROOT", path) c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") diff --git a/internal/bundle/validate_test.go b/internal/bundle/validate_test.go new file mode 100644 index 000000000..18da89e4c --- /dev/null +++ b/internal/bundle/validate_test.go @@ -0,0 +1,60 @@ +package bundle + +import ( + "context" + "encoding/json" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccBundleValidate(t *testing.T) { + testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") + + tmpDir := t.TempDir() + testutil.WriteFile(t, + ` +bundle: + name: "foobar" + +resources: + jobs: + outer_loop: + name: outer loop + tasks: + - task_key: my task + run_job_task: + job_id: ${resources.jobs.inner_loop.id} + + inner_loop: + name: inner loop + +`, tmpDir, "databricks.yml") + + ctx := context.Background() + stdout, err := validateBundle(t, ctx, tmpDir) + require.NoError(t, err) + + config := make(map[string]any) + err = json.Unmarshal(stdout, &config) + require.NoError(t, err) + + getValue := func(key string) any { + v, err := convert.FromTyped(config, dyn.NilValue) + require.NoError(t, err) + v, err = dyn.GetByPath(v, dyn.MustPathFromString(key)) + require.NoError(t, err) + return v.AsAny() + } + + assert.Equal(t, "foobar", getValue("bundle.name")) + assert.Equal(t, "outer loop", getValue("resources.jobs.outer_loop.name")) + assert.Equal(t, "inner loop", getValue("resources.jobs.inner_loop.name")) + assert.Equal(t, "my task", getValue("resources.jobs.outer_loop.tasks[0].task_key")) + // Assert resource references are retained in the output. + assert.Equal(t, "${resources.jobs.inner_loop.id}", getValue("resources.jobs.outer_loop.tasks[0].run_job_task.job_id")) +} diff --git a/internal/filer_test.go b/internal/filer_test.go index 3361de5bc..275304256 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -511,6 +511,7 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { content string }{ {"dir1/dir2/dir3/file.txt", "file content"}, + {"dir1/notebook.py", "# Databricks notebook source\nprint('first upload'))"}, {"foo.py", "print('foo')"}, {"foo.r", "print('foo')"}, {"foo.scala", "println('foo')"}, @@ -523,6 +524,16 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { {"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""}, } + // Assert that every file has a unique basename + basenames := map[string]struct{}{} + for _, f := range files { + basename := path.Base(f.name) + if _, ok := basenames[basename]; ok { + t.Fatalf("basename %s is not unique", basename) + } + basenames[basename] = struct{}{} + } + ctx := context.Background() wf, _ := setupWsfsExtensionsFiler(t) @@ -534,7 +545,6 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { // Read entries entries, err := wf.ReadDir(ctx, ".") require.NoError(t, err) - assert.Len(t, entries, len(files)) names := []string{} for _, e := range entries { names = append(names, e.Name()) @@ -552,6 +562,18 @@ func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { "scalaNb.scala", "sqlNb.sql", }, names) + + // Read entries in subdirectory + entries, err = wf.ReadDir(ctx, "dir1") + require.NoError(t, err) + names = []string{} + for _, e := range entries { + names = append(names, e.Name()) + } + assert.Equal(t, []string{ + "dir2", + "notebook.py", + }, names) } func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { diff --git a/internal/testutil/file.go b/internal/testutil/file.go new file mode 100644 index 000000000..ba2c3280e --- /dev/null +++ b/internal/testutil/file.go @@ -0,0 +1,48 @@ +package testutil + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TouchNotebook(t *testing.T, elems ...string) string { + path := filepath.Join(elems...) + err := os.MkdirAll(filepath.Dir(path), 0755) + require.NoError(t, err) + + err = os.WriteFile(path, []byte("# Databricks notebook source"), 0644) + require.NoError(t, err) + return path +} + +func Touch(t *testing.T, elems ...string) string { + path := filepath.Join(elems...) + err := os.MkdirAll(filepath.Dir(path), 0755) + require.NoError(t, err) + + f, err := os.Create(path) + require.NoError(t, err) + + err = f.Close() + require.NoError(t, err) + return path +} + +func WriteFile(t *testing.T, content string, elems ...string) string { + path := filepath.Join(elems...) + err := os.MkdirAll(filepath.Dir(path), 0755) + require.NoError(t, err) + + f, err := os.Create(path) + require.NoError(t, err) + + _, err = f.WriteString(content) + require.NoError(t, err) + + err = f.Close() + require.NoError(t, err) + return path +} diff --git a/internal/testutil/touch.go b/internal/testutil/touch.go deleted file mode 100644 index 55683f3ed..000000000 --- a/internal/testutil/touch.go +++ /dev/null @@ -1,26 +0,0 @@ -package testutil - -import ( - "os" - "path/filepath" - "testing" - - "github.com/stretchr/testify/require" -) - -func TouchNotebook(t *testing.T, elems ...string) string { - path := filepath.Join(elems...) - os.MkdirAll(filepath.Dir(path), 0755) - err := os.WriteFile(path, []byte("# Databricks notebook source"), 0644) - require.NoError(t, err) - return path -} - -func Touch(t *testing.T, elems ...string) string { - path := filepath.Join(elems...) - os.MkdirAll(filepath.Dir(path), 0755) - f, err := os.Create(path) - require.NoError(t, err) - f.Close() - return path -} diff --git a/libs/databrickscfg/loader.go b/libs/databrickscfg/loader.go index 2e22ee950..12a516c59 100644 --- a/libs/databrickscfg/loader.go +++ b/libs/databrickscfg/loader.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "os" + "io/fs" "strings" "github.com/databricks/cli/libs/log" @@ -68,7 +68,7 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error { ctx := context.Background() configFile, err := config.LoadFile(cfg.ConfigFile) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } return fmt.Errorf("cannot parse config file: %w", err) diff --git a/libs/databrickscfg/ops.go b/libs/databrickscfg/ops.go index 90795afd5..6a1c182af 100644 --- a/libs/databrickscfg/ops.go +++ b/libs/databrickscfg/ops.go @@ -2,7 +2,9 @@ package databrickscfg import ( "context" + "errors" "fmt" + "io/fs" "os" "strings" @@ -29,7 +31,7 @@ func loadOrCreateConfigFile(filename string) (*config.File, error) { filename = fmt.Sprintf("%s%s", homedir, filename[1:]) } configFile, err := config.LoadFile(filename) - if err != nil && os.IsNotExist(err) { + if err != nil && errors.Is(err, fs.ErrNotExist) { file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileMode) if err != nil { return nil, fmt.Errorf("create %s: %w", filename, err) diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index ae491d8ab..e8d321f66 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -12,26 +12,24 @@ import ( type fromTypedOptions int const ( - // If this flag is set, zero values for scalars (strings, bools, ints, floats) - // would resolve to corresponding zero values in the dynamic representation. - // Otherwise, zero values for scalars resolve to dyn.NilValue. + // If this flag is set, zero values in the typed representation are resolved to + // the equivalent zero value in the dynamic representation. + // If it is not set, zero values resolve to [dyn.NilValue]. // - // This flag exists to reconcile the default values for scalars in a Go struct - // being zero values with zero values in a dynamic representation. In a Go struct, - // zero values are the same as the values not being set at all. This is not the case - // in the dynamic representation. - // - // If a scalar value in a typed Go struct is zero, in the dynamic representation - // we would set it to dyn.NilValue, i.e. equivalent to the value not being set at all. - // - // If a scalar value in a Go map, slice or pointer is set to zero, we will set it - // to the zero value in the dynamic representation, and not dyn.NilValue. This is - // equivalent to the value being intentionally set to zero. - includeZeroValuedScalars fromTypedOptions = 1 << iota + // This flag exists to reconcile default values in Go being zero values with values + // being intentionally set to their zero value. We capture zero values in the dynamic + // configuration if they are 1) behind a pointer, 2) a map value, 3) a slice element, + // in the typed configuration. + includeZeroValues fromTypedOptions = 1 << iota ) // FromTyped converts changes made in the typed structure w.r.t. the configuration value // back to the configuration value, retaining existing location information where possible. +// +// It uses the reference value both for location information and to determine if the typed +// value was changed or not. For example, if a struct-by-value field is nil in the reference +// it will be zero-valued in the typed configuration. If it remains zero-valued, this +// this function will still emit a nil value in the dynamic representation. func FromTyped(src any, ref dyn.Value) (dyn.Value, error) { return fromTyped(src, ref) } @@ -44,42 +42,58 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, // Dereference pointer if necessary for srcv.Kind() == reflect.Pointer { if srcv.IsNil() { - return dyn.NilValue, nil + return dyn.NilValue.WithLocation(ref.Location()), nil } srcv = srcv.Elem() - // If a pointer to a scalar type points to a zero value, we should include + // If a pointer to a type points to a zero value, we should include // that zero value in the dynamic representation. // This is because by default a pointer is nil in Go, and it not being nil // indicates its value was intentionally set to zero. - if !slices.Contains(options, includeZeroValuedScalars) { - options = append(options, includeZeroValuedScalars) + if !slices.Contains(options, includeZeroValues) { + options = append(options, includeZeroValues) } } + var v dyn.Value + var err error switch srcv.Kind() { case reflect.Struct: - return fromTypedStruct(srcv, ref) + v, err = fromTypedStruct(srcv, ref, options...) case reflect.Map: - return fromTypedMap(srcv, ref) + v, err = fromTypedMap(srcv, ref) case reflect.Slice: - return fromTypedSlice(srcv, ref) + v, err = fromTypedSlice(srcv, ref) case reflect.String: - return fromTypedString(srcv, ref, options...) + v, err = fromTypedString(srcv, ref, options...) case reflect.Bool: - return fromTypedBool(srcv, ref, options...) + v, err = fromTypedBool(srcv, ref, options...) case reflect.Int, reflect.Int32, reflect.Int64: - return fromTypedInt(srcv, ref, options...) + v, err = fromTypedInt(srcv, ref, options...) case reflect.Float32, reflect.Float64: - return fromTypedFloat(srcv, ref, options...) + v, err = fromTypedFloat(srcv, ref, options...) + case reflect.Invalid: + // If the value is untyped and not set (e.g. any type with nil value), we return nil. + v, err = dyn.NilValue, nil + default: + return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) } - return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) + // Ensure the location metadata is retained. + if err != nil { + return dyn.InvalidValue, err + } + return v.WithLocation(ref.Location()), err } -func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { +func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } case dyn.KindMap, dyn.KindNil: default: return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) @@ -99,23 +113,43 @@ func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { refv = dyn.NilValue } + var options []fromTypedOptions + if v.Kind() == reflect.Interface { + options = append(options, includeZeroValues) + } + // Convert the field taking into account the reference value (may be equal to config.NilValue). - nv, err := fromTyped(v.Interface(), refv) + nv, err := fromTyped(v.Interface(), refv, options...) if err != nil { return dyn.InvalidValue, err } - if nv != dyn.NilValue { + // Either if the key was set in the reference or the field is not zero-valued, we include it. + if ok || nv.Kind() != dyn.KindNil { out.Set(refk, nv) } } - return dyn.NewValue(out, ref.Location()), nil + // Return the new mapping if: + // 1. The mapping has entries (i.e. the struct was not empty). + // 2. The reference is a map (i.e. the struct was and still is empty). + // 3. The "includeZeroValues" option is set (i.e. the struct is a non-nil pointer). + if out.Len() > 0 || ref.Kind() == dyn.KindMap || slices.Contains(options, includeZeroValues) { + return dyn.V(out), nil + } + + // Otherwise, return nil. + return dyn.NilValue, nil } func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } case dyn.KindMap, dyn.KindNil: default: return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) @@ -143,7 +177,7 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { } // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), refv, includeZeroValuedScalars) + nv, err := fromTyped(v.Interface(), refv, includeZeroValues) if err != nil { return dyn.InvalidValue, err } @@ -153,12 +187,17 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out.Set(refk, nv) } - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } case dyn.KindSequence, dyn.KindNil: default: return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) @@ -172,9 +211,15 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out := make([]dyn.Value, src.Len()) for i := 0; i < src.Len(); i++ { v := src.Index(i) + refv := ref.Index(i) + + // Use nil reference if there is no reference for this index. + if refv.Kind() == dyn.KindInvalid { + refv = dyn.NilValue + } // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), ref.Index(i), includeZeroValuedScalars) + nv, err := fromTyped(v.Interface(), refv, includeZeroValues) if err != nil { return dyn.InvalidValue, err } @@ -182,7 +227,7 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out[i] = nv } - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -197,7 +242,7 @@ func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptio case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { + if src.IsZero() && !slices.Contains(options, includeZeroValues) { return dyn.NilValue, nil } return dyn.V(src.String()), nil @@ -217,7 +262,7 @@ func fromTypedBool(src reflect.Value, ref dyn.Value, options ...fromTypedOptions case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { + if src.IsZero() && !slices.Contains(options, includeZeroValues) { return dyn.NilValue, nil } return dyn.V(src.Bool()), nil @@ -242,7 +287,7 @@ func fromTypedInt(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { + if src.IsZero() && !slices.Contains(options, includeZeroValues) { return dyn.NilValue, nil } return dyn.V(src.Int()), nil @@ -267,7 +312,7 @@ func fromTypedFloat(src reflect.Value, ref dyn.Value, options ...fromTypedOption case dyn.KindNil: // This field is not set in the reference. We set it to nil if it's zero // valued in the typed representation and the includeZeroValues option is not set. - if src.IsZero() && !slices.Contains(options, includeZeroValuedScalars) { + if src.IsZero() && !slices.Contains(options, includeZeroValues) { return dyn.NilValue, nil } return dyn.V(src.Float()), nil diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index f75470f42..9141a6948 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -15,9 +15,14 @@ func TestFromTypedStructZeroFields(t *testing.T) { } src := Tmp{} - ref := dyn.NilValue - nv, err := FromTyped(src, ref) + // For an empty struct with a nil reference we expect a nil. + nv, err := FromTyped(src, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + + // For an empty struct with a non-nil reference we expect an empty map. + nv, err = FromTyped(src, dyn.V(map[string]dyn.Value{})) require.NoError(t, err) assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) } @@ -28,17 +33,54 @@ func TestFromTypedStructPointerZeroFields(t *testing.T) { Bar string `json:"bar"` } - // For an initialized pointer we expect an empty map. - src := &Tmp{} - nv, err := FromTyped(src, dyn.NilValue) - require.NoError(t, err) - assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) + var src *Tmp + var nv dyn.Value + var err error - // For a nil pointer we expect nil. + // For a nil pointer with a nil reference we expect a nil. src = nil nv, err = FromTyped(src, dyn.NilValue) require.NoError(t, err) assert.Equal(t, dyn.NilValue, nv) + + // For a nil pointer with a non-nil reference we expect a nil. + src = nil + nv, err = FromTyped(src, dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + + // For an initialized pointer with a nil reference we expect an empty map. + src = &Tmp{} + nv, err = FromTyped(src, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) + + // For an initialized pointer with a non-nil reference we expect an empty map. + src = &Tmp{} + nv, err = FromTyped(src, dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) +} + +func TestFromTypedStructNilFields(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + // For a zero value struct with a reference containing nil fields we expect the nils to be retained. + src := Tmp{} + ref := dyn.V(map[string]dyn.Value{ + "foo": dyn.NilValue, + "bar": dyn.NilValue, + }) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "foo": dyn.NilValue, + "bar": dyn.NilValue, + }), nv) } func TestFromTypedStructSetFields(t *testing.T) { @@ -61,7 +103,7 @@ func TestFromTypedStructSetFields(t *testing.T) { }), nv) } -func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedStructSetFieldsRetainLocation(t *testing.T) { type Tmp struct { Foo string `json:"foo"` Bar string `json:"bar"` @@ -80,11 +122,9 @@ func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. + // Assert foo and bar have retained their location. assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) } func TestFromTypedStringMapWithZeroValue(t *testing.T) { @@ -312,7 +352,7 @@ func TestFromTypedMapNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { var src = map[string]string{ "foo": "bar", "bar": "qux", @@ -326,11 +366,9 @@ func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. + // Assert foo and bar have retained their locations. assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) + assert.Equal(t, dyn.NewValue("qux", dyn.Location{File: "bar"}), nv.Get("bar")) } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { @@ -387,7 +425,7 @@ func TestFromTypedSliceNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { var src = []string{ "foo", "bar", @@ -395,17 +433,15 @@ func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { ref := dyn.V([]dyn.Value{ dyn.NewValue("foo", dyn.Location{File: "foo"}), - dyn.NewValue("baz", dyn.Location{File: "baz"}), + dyn.NewValue("bar", dyn.Location{File: "bar"}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. + // Assert foo and bar have retained their locations. assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv.Index(0)) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("bar", dyn.Location{}), nv.Index(1)) + assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "bar"}), nv.Index(1)) } func TestFromTypedStringEmpty(t *testing.T) { @@ -440,12 +476,20 @@ func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V("new"), nv) } -func TestFromTypedStringRetainsLocationsIfUnchanged(t *testing.T) { - var src string = "foo" +func TestFromTypedStringRetainsLocations(t *testing.T) { var ref = dyn.NewValue("foo", dyn.Location{File: "foo"}) + + // case: value has not been changed + var src string = "foo" nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = "bar" + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv) } func TestFromTypedStringTypeError(t *testing.T) { @@ -487,12 +531,20 @@ func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(true), nv) } -func TestFromTypedBoolRetainsLocationsIfUnchanged(t *testing.T) { - var src bool = true +func TestFromTypedBoolRetainsLocations(t *testing.T) { var ref = dyn.NewValue(true, dyn.Location{File: "foo"}) + + // case: value has not been changed + var src bool = true nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(true, dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = false + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(false, dyn.Location{File: "foo"}), nv) } func TestFromTypedBoolVariableReference(t *testing.T) { @@ -542,12 +594,20 @@ func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(int64(1234)), nv) } -func TestFromTypedIntRetainsLocationsIfUnchanged(t *testing.T) { - var src int = 1234 +func TestFromTypedIntRetainsLocations(t *testing.T) { var ref = dyn.NewValue(1234, dyn.Location{File: "foo"}) + + // case: value has not been changed + var src int = 1234 nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(1234, dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = 1235 + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(int64(1235), dyn.Location{File: "foo"}), nv) } func TestFromTypedIntVariableReference(t *testing.T) { @@ -597,12 +657,21 @@ func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(1.23), nv) } -func TestFromTypedFloatRetainsLocationsIfUnchanged(t *testing.T) { - var src float64 = 1.23 +func TestFromTypedFloatRetainsLocations(t *testing.T) { + var src float64 var ref = dyn.NewValue(1.23, dyn.Location{File: "foo"}) + + // case: value has not been changed + src = 1.23 nv, err := FromTyped(src, ref) require.NoError(t, err) assert.Equal(t, dyn.NewValue(1.23, dyn.Location{File: "foo"}), nv) + + // case: value has been changed + src = 1.24 + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(1.24, dyn.Location{File: "foo"}), nv) } func TestFromTypedFloatVariableReference(t *testing.T) { @@ -619,3 +688,79 @@ func TestFromTypedFloatTypeError(t *testing.T) { _, err := FromTyped(src, ref) require.Error(t, err) } + +func TestFromTypedAny(t *testing.T) { + type Tmp struct { + Foo any `json:"foo"` + Bar any `json:"bar"` + Foz any `json:"foz"` + Baz any `json:"baz"` + } + + src := Tmp{ + Foo: "foo", + Bar: false, + Foz: 0, + Baz: map[string]any{ + "foo": "foo", + "bar": 1234, + "qux": 0, + "nil": nil, + }, + } + + ref := dyn.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "foo": dyn.V("foo"), + "bar": dyn.V(false), + "foz": dyn.V(int64(0)), + "baz": dyn.V(map[string]dyn.Value{ + "foo": dyn.V("foo"), + "bar": dyn.V(int64(1234)), + "qux": dyn.V(int64(0)), + "nil": dyn.V(nil), + }), + }), nv) +} + +func TestFromTypedAnyNil(t *testing.T) { + var src any = nil + var ref = dyn.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) +} + +func TestFromTypedNilPointerRetainsLocations(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + var src *Tmp + ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) +} + +func TestFromTypedNilMapRetainsLocation(t *testing.T) { + var src map[string]string + ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) +} + +func TestFromTypedNilSliceRetainsLocation(t *testing.T) { + var src []string + ref := dyn.NewValue(nil, dyn.Location{File: "foobar"}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, dyn.Location{File: "foobar"}), nv) +} diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 35d4d8210..ad82e20ef 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -56,6 +56,8 @@ func (n normalizeOptions) normalizeType(typ reflect.Type, src dyn.Value, seen [] return n.normalizeInt(typ, src, path) case reflect.Float32, reflect.Float64: return n.normalizeFloat(typ, src, path) + case reflect.Interface: + return n.normalizeInterface(typ, src, path) } return dyn.InvalidValue, diag.Errorf("unsupported type: %s", typ.Kind()) @@ -166,8 +168,15 @@ func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen return dyn.NewValue(out, src.Location()), diags case dyn.KindNil: return src, diags + + case dyn.KindString: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } } + // Cannot interpret as a struct. return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src, path)) } @@ -197,8 +206,15 @@ func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []r return dyn.NewValue(out, src.Location()), diags case dyn.KindNil: return src, diags + + case dyn.KindString: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } } + // Cannot interpret as a map. return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src, path)) } @@ -225,8 +241,15 @@ func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value, seen [ return dyn.NewValue(out, src.Location()), diags case dyn.KindNil: return src, diags + + case dyn.KindString: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } } + // Cannot interpret as a slice. return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindSequence, src, path)) } @@ -371,3 +394,7 @@ func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value, path d return dyn.NewValue(out, src.Location()), diags } + +func (n normalizeOptions) normalizeInterface(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { + return src, nil +} diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 843b4ea59..299ffcabd 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -223,6 +223,52 @@ func TestNormalizeStructIncludeMissingFieldsOnRecursiveType(t *testing.T) { }), vout) } +func TestNormalizeStructVariableReference(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.NewValue("${var.foo}", dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeStructRandomStringError(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.NewValue("var foo", dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found string`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeStructIntError(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found int`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + func TestNormalizeMap(t *testing.T) { var typ map[string]string vin := dyn.V(map[string]dyn.Value{ @@ -312,6 +358,40 @@ func TestNormalizeMapNestedError(t *testing.T) { ) } +func TestNormalizeMapVariableReference(t *testing.T) { + var typ map[string]string + vin := dyn.NewValue("${var.foo}", dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeMapRandomStringError(t *testing.T) { + var typ map[string]string + vin := dyn.NewValue("var foo", dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found string`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeMapIntError(t *testing.T) { + var typ map[string]string + vin := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found int`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + func TestNormalizeSlice(t *testing.T) { var typ []string vin := dyn.V([]dyn.Value{ @@ -400,6 +480,40 @@ func TestNormalizeSliceNestedError(t *testing.T) { ) } +func TestNormalizeSliceVariableReference(t *testing.T) { + var typ []string + vin := dyn.NewValue("${var.foo}", dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeSliceRandomStringError(t *testing.T) { + var typ []string + vin := dyn.NewValue("var foo", dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected sequence, found string`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeSliceIntError(t *testing.T) { + var typ []string + vin := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected sequence, found int`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + func TestNormalizeString(t *testing.T) { var typ string vin := dyn.V("string") @@ -725,3 +839,29 @@ func TestNormalizeAnchors(t *testing.T) { "foo": "bar", }, vout.AsAny()) } + +func TestNormalizeBoolToAny(t *testing.T) { + var typ any + vin := dyn.NewValue(false, dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue(false, dyn.Location{File: "file", Line: 1, Column: 1}), vout) +} + +func TestNormalizeIntToAny(t *testing.T) { + var typ any + vin := dyn.NewValue(10, dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue(10, dyn.Location{File: "file", Line: 1, Column: 1}), vout) +} + +func TestNormalizeSliceToAny(t *testing.T) { + var typ any + v1 := dyn.NewValue(1, dyn.Location{File: "file", Line: 1, Column: 1}) + v2 := dyn.NewValue(2, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue([]dyn.Value{v1, v2}, dyn.Location{File: "file", Line: 1, Column: 1}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue([]dyn.Value{v1, v2}, dyn.Location{File: "file", Line: 1, Column: 1}), vout) +} diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index f10853a2e..181c88cc9 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -16,7 +16,7 @@ func ToTyped(dst any, src dyn.Value) error { for dstv.Kind() == reflect.Pointer { // If the source value is nil and the destination is a settable pointer, // set the destination to nil. Also see `end_to_end_test.go`. - if dstv.CanSet() && src == dyn.NilValue { + if dstv.CanSet() && src.Kind() == dyn.KindNil { dstv.SetZero() return nil } @@ -46,6 +46,8 @@ func ToTyped(dst any, src dyn.Value) error { return toTypedInt(dstv, src) case reflect.Float32, reflect.Float64: return toTypedFloat(dstv, src) + case reflect.Interface: + return toTypedInterface(dstv, src) } return fmt.Errorf("unsupported type: %s", dstv.Kind()) @@ -101,6 +103,12 @@ func toTypedStruct(dst reflect.Value, src dyn.Value) error { case dyn.KindNil: dst.SetZero() return nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -132,6 +140,12 @@ func toTypedMap(dst reflect.Value, src dyn.Value) error { case dyn.KindNil: dst.SetZero() return nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -157,6 +171,12 @@ func toTypedSlice(dst reflect.Value, src dyn.Value) error { case dyn.KindNil: dst.SetZero() return nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -260,3 +280,13 @@ func toTypedFloat(dst reflect.Value, src dyn.Value) error { msg: fmt.Sprintf("expected a float, found a %s", src.Kind()), } } + +func toTypedInterface(dst reflect.Value, src dyn.Value) error { + if src.Kind() == dyn.KindNil { + dst.Set(reflect.Zero(dst.Type())) + return nil + } + + dst.Set(reflect.ValueOf(src.AsAny())) + return nil +} diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index 56d98a3cf..37d85539c 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -511,3 +511,32 @@ func TestToTypedWithAliasKeyType(t *testing.T) { assert.Equal(t, "bar", out["foo"]) assert.Equal(t, "baz", out["bar"]) } + +func TestToTypedAnyWithBool(t *testing.T) { + var out any + err := ToTyped(&out, dyn.V(false)) + require.NoError(t, err) + assert.Equal(t, false, out) + + err = ToTyped(&out, dyn.V(true)) + require.NoError(t, err) + assert.Equal(t, true, out) +} + +func TestToTypedAnyWithMap(t *testing.T) { + var out any + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), + }) + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, map[string]any{"foo": "bar", "bar": "baz"}, out) +} + +func TestToTypedAnyWithNil(t *testing.T) { + var out any + err := ToTyped(&out, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, nil, out) +} diff --git a/libs/dyn/dynvar/ref.go b/libs/dyn/dynvar/ref.go index e6340269f..bf160fa85 100644 --- a/libs/dyn/dynvar/ref.go +++ b/libs/dyn/dynvar/ref.go @@ -6,7 +6,7 @@ import ( "github.com/databricks/cli/libs/dyn" ) -const VariableRegex = `\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\}` +const VariableRegex = `\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\[[0-9]+\])*)*(\[[0-9]+\])*)\}` var re = regexp.MustCompile(VariableRegex) diff --git a/libs/dyn/dynvar/resolve_test.go b/libs/dyn/dynvar/resolve_test.go index bbecbb776..498322a42 100644 --- a/libs/dyn/dynvar/resolve_test.go +++ b/libs/dyn/dynvar/resolve_test.go @@ -247,3 +247,63 @@ func TestResolveWithInterpolateAliasedRef(t *testing.T) { assert.Equal(t, "a", getByPath(t, out, "b").MustString()) assert.Equal(t, "a", getByPath(t, out, "c").MustString()) } + +func TestResolveIndexedRefs(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{dyn.V("a"), dyn.V("b")}), + "a": dyn.V("a: ${slice[0]}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} + +func TestResolveIndexedRefsFromMap(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "map": dyn.V( + map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{dyn.V("a")}), + }), + "a": dyn.V("a: ${map.slice[0]}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} + +func TestResolveMapFieldFromIndexedRefs(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "map": dyn.V( + map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{ + dyn.V(map[string]dyn.Value{ + "value": dyn.V("a"), + }), + }), + }), + "a": dyn.V("a: ${map.slice[0].value}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} + +func TestResolveNestedIndexedRefs(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{ + dyn.V([]dyn.Value{dyn.V("a")}), + }), + "a": dyn.V("a: ${slice[0][0]}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} diff --git a/libs/dyn/merge/merge.go b/libs/dyn/merge/merge.go index 69ccf516a..ffe000da3 100644 --- a/libs/dyn/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -34,17 +34,17 @@ func merge(a, b dyn.Value) (dyn.Value, error) { switch ak { case dyn.KindMap: if bk != dyn.KindMap { - return dyn.NilValue, fmt.Errorf("cannot merge map with %s", bk) + return dyn.InvalidValue, fmt.Errorf("cannot merge map with %s", bk) } return mergeMap(a, b) case dyn.KindSequence: if bk != dyn.KindSequence { - return dyn.NilValue, fmt.Errorf("cannot merge sequence with %s", bk) + return dyn.InvalidValue, fmt.Errorf("cannot merge sequence with %s", bk) } return mergeSequence(a, b) default: if ak != bk { - return dyn.NilValue, fmt.Errorf("cannot merge %s with %s", ak, bk) + return dyn.InvalidValue, fmt.Errorf("cannot merge %s with %s", ak, bk) } return mergePrimitive(a, b) } @@ -66,7 +66,7 @@ func mergeMap(a, b dyn.Value) (dyn.Value, error) { // If the key already exists, merge the values. merged, err := merge(ov, pv) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } out.Set(pk, merged) } else { diff --git a/libs/dyn/merge/merge_test.go b/libs/dyn/merge/merge_test.go index eaaaab16f..3706dbd77 100644 --- a/libs/dyn/merge/merge_test.go +++ b/libs/dyn/merge/merge_test.go @@ -76,7 +76,7 @@ func TestMergeMapsError(t *testing.T) { { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge map with string") - assert.Equal(t, dyn.NilValue, out) + assert.Equal(t, dyn.InvalidValue, out) } } @@ -151,7 +151,7 @@ func TestMergeSequencesError(t *testing.T) { { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge sequence with string") - assert.Equal(t, dyn.NilValue, out) + assert.Equal(t, dyn.InvalidValue, out) } } @@ -202,6 +202,6 @@ func TestMergePrimitivesError(t *testing.T) { { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge string with map") - assert.Equal(t, dyn.NilValue, out) + assert.Equal(t, dyn.InvalidValue, out) } } diff --git a/libs/dyn/merge/override.go b/libs/dyn/merge/override.go index 97e8f1009..823fb1933 100644 --- a/libs/dyn/merge/override.go +++ b/libs/dyn/merge/override.go @@ -1,6 +1,7 @@ package merge import ( + "errors" "fmt" "github.com/databricks/cli/libs/dyn" @@ -13,6 +14,9 @@ import ( // For instance, it can disallow changes outside the specific path(s), or update // the location of the effective value. // +// Values returned by 'VisitInsert' and 'VisitUpdate' are used as the final value +// of the node. 'VisitDelete' can return ErrOverrideUndoDelete to undo delete. +// // 'VisitDelete' is called when a value is removed from mapping or sequence // 'VisitInsert' is called when a new value is added to mapping or sequence // 'VisitUpdate' is called when a leaf value is updated @@ -22,6 +26,8 @@ type OverrideVisitor struct { VisitUpdate func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) } +var ErrOverrideUndoDelete = errors.New("undo delete operation") + // Override overrides value 'leftRoot' with 'rightRoot', keeping 'location' if values // haven't changed. Preserving 'location' is important to preserve the original source of the value // for error reporting. @@ -30,10 +36,6 @@ func Override(leftRoot dyn.Value, rightRoot dyn.Value, visitor OverrideVisitor) } func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { - if left == dyn.NilValue && right == dyn.NilValue { - return dyn.NilValue, nil - } - if left.Kind() != right.Kind() { return visitor.VisitUpdate(basePath, left, right) } @@ -98,9 +100,11 @@ func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor Overri } else { return visitor.VisitUpdate(basePath, left, right) } + case dyn.KindNil: + return left, nil } - return dyn.InvalidValue, fmt.Errorf("unexpected kind %s", left.Kind()) + return dyn.InvalidValue, fmt.Errorf("unexpected kind %s at %s", left.Kind(), basePath.String()) } func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { @@ -113,7 +117,13 @@ func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dy err := visitor.VisitDelete(path, leftPair.Value) - if err != nil { + // if 'delete' was undone, add it back + if errors.Is(err, ErrOverrideUndoDelete) { + err := out.Set(leftPair.Key, leftPair.Value) + if err != nil { + return dyn.NewMapping(), err + } + } else if err != nil { return dyn.NewMapping(), err } } @@ -188,7 +198,10 @@ func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, vi path := basePath.Append(dyn.Index(i)) err := visitor.VisitDelete(path, left[i]) - if err != nil { + // if 'delete' was undone, add it back + if errors.Is(err, ErrOverrideUndoDelete) { + values = append(values, left[i]) + } else if err != nil { return nil, err } } diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go index a34f23424..d9ca97486 100644 --- a/libs/dyn/merge/override_test.go +++ b/libs/dyn/merge/override_test.go @@ -5,6 +5,8 @@ import ( "testing" "time" + "github.com/stretchr/testify/require" + "github.com/databricks/cli/libs/dyn" assert "github.com/databricks/cli/libs/dyn/dynassert" ) @@ -330,9 +332,9 @@ func TestOverride_Primitive(t *testing.T) { { name: "nil (not updated)", state: visitorState{}, - left: dyn.NilValue, - right: dyn.NilValue, - expected: dyn.NilValue, + left: dyn.NilValue.WithLocation(leftLocation), + right: dyn.NilValue.WithLocation(rightLocation), + expected: dyn.NilValue.WithLocation(leftLocation), }, { name: "nil (updated)", @@ -393,6 +395,24 @@ func TestOverride_Primitive(t *testing.T) { assert.Equal(t, expected, actual) } }) + + if len(tc.state.removed) > 0 { + t.Run(tc.name+" - visitor can undo delete", func(t *testing.T) { + s, visitor := createVisitor(visitorOpts{deleteError: ErrOverrideUndoDelete}) + out, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) + require.NoError(t, err) + + for _, removed := range s.removed { + expected, err := dyn.GetByPath(tc.left, dyn.MustPathFromString(removed)) + require.NoError(t, err) + + actual, err := dyn.GetByPath(out, dyn.MustPathFromString(removed)) + + assert.NoError(t, err) + assert.Equal(t, expected, actual) + } + }) + } } } } @@ -449,6 +469,7 @@ type visitorState struct { type visitorOpts struct { error error + deleteError error returnValue *dyn.Value } @@ -470,7 +491,13 @@ func createVisitor(opts visitorOpts) (*visitorState, OverrideVisitor) { VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { s.removed = append(s.removed, valuePath.String()) - return opts.error + if opts.error != nil { + return opts.error + } else if opts.deleteError != nil { + return opts.deleteError + } else { + return nil + } }, VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { s.added = append(s.added, valuePath.String()) diff --git a/libs/dyn/value.go b/libs/dyn/value.go index 2e8f1b9af..3d62ea1f5 100644 --- a/libs/dyn/value.go +++ b/libs/dyn/value.go @@ -110,12 +110,12 @@ func (v Value) AsAny() any { func (v Value) Get(key string) Value { m, ok := v.AsMap() if !ok { - return NilValue + return InvalidValue } vv, ok := m.GetByString(key) if !ok { - return NilValue + return InvalidValue } return vv @@ -124,11 +124,11 @@ func (v Value) Get(key string) Value { func (v Value) Index(i int) Value { s, ok := v.v.([]Value) if !ok { - return NilValue + return InvalidValue } if i < 0 || i >= len(s) { - return NilValue + return InvalidValue } return s[i] diff --git a/libs/dyn/value_underlying_test.go b/libs/dyn/value_underlying_test.go index 9878cfaf9..83cffb772 100644 --- a/libs/dyn/value_underlying_test.go +++ b/libs/dyn/value_underlying_test.go @@ -18,15 +18,15 @@ func TestValueUnderlyingMap(t *testing.T) { vv1, ok := v.AsMap() assert.True(t, ok) - _, ok = dyn.NilValue.AsMap() + _, ok = dyn.InvalidValue.AsMap() assert.False(t, ok) vv2 := v.MustMap() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind map, got nil", func() { - dyn.NilValue.MustMap() + assert.PanicsWithValue(t, "expected kind map, got invalid", func() { + dyn.InvalidValue.MustMap() }) } @@ -40,15 +40,15 @@ func TestValueUnderlyingSequence(t *testing.T) { vv1, ok := v.AsSequence() assert.True(t, ok) - _, ok = dyn.NilValue.AsSequence() + _, ok = dyn.InvalidValue.AsSequence() assert.False(t, ok) vv2 := v.MustSequence() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind sequence, got nil", func() { - dyn.NilValue.MustSequence() + assert.PanicsWithValue(t, "expected kind sequence, got invalid", func() { + dyn.InvalidValue.MustSequence() }) } @@ -58,15 +58,15 @@ func TestValueUnderlyingString(t *testing.T) { vv1, ok := v.AsString() assert.True(t, ok) - _, ok = dyn.NilValue.AsString() + _, ok = dyn.InvalidValue.AsString() assert.False(t, ok) vv2 := v.MustString() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind string, got nil", func() { - dyn.NilValue.MustString() + assert.PanicsWithValue(t, "expected kind string, got invalid", func() { + dyn.InvalidValue.MustString() }) } @@ -76,15 +76,15 @@ func TestValueUnderlyingBool(t *testing.T) { vv1, ok := v.AsBool() assert.True(t, ok) - _, ok = dyn.NilValue.AsBool() + _, ok = dyn.InvalidValue.AsBool() assert.False(t, ok) vv2 := v.MustBool() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind bool, got nil", func() { - dyn.NilValue.MustBool() + assert.PanicsWithValue(t, "expected kind bool, got invalid", func() { + dyn.InvalidValue.MustBool() }) } @@ -94,15 +94,15 @@ func TestValueUnderlyingInt(t *testing.T) { vv1, ok := v.AsInt() assert.True(t, ok) - _, ok = dyn.NilValue.AsInt() + _, ok = dyn.InvalidValue.AsInt() assert.False(t, ok) vv2 := v.MustInt() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind int, got nil", func() { - dyn.NilValue.MustInt() + assert.PanicsWithValue(t, "expected kind int, got invalid", func() { + dyn.InvalidValue.MustInt() }) // Test int32 type specifically. @@ -124,15 +124,15 @@ func TestValueUnderlyingFloat(t *testing.T) { vv1, ok := v.AsFloat() assert.True(t, ok) - _, ok = dyn.NilValue.AsFloat() + _, ok = dyn.InvalidValue.AsFloat() assert.False(t, ok) vv2 := v.MustFloat() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind float, got nil", func() { - dyn.NilValue.MustFloat() + assert.PanicsWithValue(t, "expected kind float, got invalid", func() { + dyn.InvalidValue.MustFloat() }) // Test float64 type specifically. @@ -148,14 +148,14 @@ func TestValueUnderlyingTime(t *testing.T) { vv1, ok := v.AsTime() assert.True(t, ok) - _, ok = dyn.NilValue.AsTime() + _, ok = dyn.InvalidValue.AsTime() assert.False(t, ok) vv2 := v.MustTime() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind time, got nil", func() { - dyn.NilValue.MustTime() + assert.PanicsWithValue(t, "expected kind time, got invalid", func() { + dyn.InvalidValue.MustTime() }) } diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index 3fe356194..4d3cf5014 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -6,6 +6,28 @@ import ( "slices" ) +// This error is returned if the path indicates that a map or sequence is expected, but the value is nil. +type cannotTraverseNilError struct { + p Path +} + +func (e cannotTraverseNilError) Error() string { + component := e.p[len(e.p)-1] + switch { + case component.isKey(): + return fmt.Sprintf("expected a map to index %q, found nil", e.p) + case component.isIndex(): + return fmt.Sprintf("expected a sequence to index %q, found nil", e.p) + default: + panic("invalid component") + } +} + +func IsCannotTraverseNilError(err error) bool { + var target cannotTraverseNilError + return errors.As(err, &target) +} + type noSuchKeyError struct { p Path } @@ -70,11 +92,17 @@ func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts switch { case component.isKey(): // Expect a map to be set if this is a key. - m, ok := v.AsMap() - if !ok { + switch v.Kind() { + case KindMap: + // OK + case KindNil: + return InvalidValue, cannotTraverseNilError{path} + default: return InvalidValue, fmt.Errorf("expected a map to index %q, found %s", path, v.Kind()) } + m := v.MustMap() + // Lookup current value in the map. ev, ok := m.GetByString(component.key) if !ok { @@ -103,11 +131,17 @@ func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts case component.isIndex(): // Expect a sequence to be set if this is an index. - s, ok := v.AsSequence() - if !ok { + switch v.Kind() { + case KindSequence: + // OK + case KindNil: + return InvalidValue, cannotTraverseNilError{path} + default: return InvalidValue, fmt.Errorf("expected a sequence to index %q, found %s", path, v.Kind()) } + s := v.MustSequence() + // Lookup current value in the sequence. if component.index < 0 || component.index >= len(s) { return InvalidValue, indexOutOfBoundsError{path} diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index f5cfea311..56a9cf9f3 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -10,9 +10,12 @@ type MapFunc func(Path, Value) (Value, error) // Foreach returns a [MapFunc] that applies the specified [MapFunc] to each // value in a map or sequence and returns the new map or sequence. +// If the input is nil, it returns nil. func Foreach(fn MapFunc) MapFunc { return func(p Path, v Value) (Value, error) { switch v.Kind() { + case KindNil: + return v, nil case KindMap: m := v.MustMap().Clone() for _, pair := range m.Pairs() { @@ -75,8 +78,10 @@ func MapByPattern(v Value, p Pattern, fn MapFunc) (Value, error) { return nv, nil } - // Return original value if a key or index is missing. - if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { + // Return original value if: + // - any map or sequence is a nil, or + // - a key or index is missing + if IsCannotTraverseNilError(err) || IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { return v, nil } diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index df6bad496..2cea0913b 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -20,11 +20,14 @@ func TestMapWithEmptyPath(t *testing.T) { } func TestMapOnNilValue(t *testing.T) { + var nv dyn.Value var err error - _, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Key("foo")), nil) - assert.ErrorContains(t, err, `expected a map to index "foo", found nil`) - _, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Index(42)), nil) - assert.ErrorContains(t, err, `expected a sequence to index "[42]", found nil`) + nv, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Key("foo")), nil) + assert.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + nv, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Index(42)), nil) + assert.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) } func TestMapFuncOnMap(t *testing.T) { @@ -269,6 +272,17 @@ func TestMapForeachOnOtherError(t *testing.T) { assert.ErrorContains(t, err, "expected a map or sequence, found int") } +func TestMapForeachOnNil(t *testing.T) { + vin := dyn.NilValue + + // Check that if foreach is applied to nil, it returns nil. + vout, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + return dyn.InvalidValue, nil + })) + assert.NoError(t, err) + assert.Equal(t, dyn.NilValue, vout) +} + func TestMapByPatternOnNilValue(t *testing.T) { var err error _, err = dyn.MapByPattern(dyn.NilValue, dyn.NewPattern(dyn.AnyKey()), nil) diff --git a/libs/dyn/walk.go b/libs/dyn/walk.go index 97b99b061..c51a11e22 100644 --- a/libs/dyn/walk.go +++ b/libs/dyn/walk.go @@ -28,7 +28,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro if err == ErrSkip { return v, nil } - return NilValue, err + return InvalidValue, err } switch v.Kind() { @@ -43,7 +43,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro continue } if err != nil { - return NilValue, err + return InvalidValue, err } out.Set(pk, nv) } @@ -57,7 +57,7 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro continue } if err != nil { - return NilValue, err + return InvalidValue, err } out = append(out, nv) } diff --git a/libs/dyn/walk_test.go b/libs/dyn/walk_test.go index d62b9a4db..f7222b0a5 100644 --- a/libs/dyn/walk_test.go +++ b/libs/dyn/walk_test.go @@ -39,7 +39,7 @@ func (w *walkCallTracker) returnSkip(path string) { } func (w *walkCallTracker) returnDrop(path string) { - w.on(path, func(v Value) Value { return NilValue }, ErrDrop) + w.on(path, func(v Value) Value { return InvalidValue }, ErrDrop) } func (w *walkCallTracker) track(p Path, v Value) (Value, error) { @@ -148,7 +148,7 @@ func TestWalkMapError(t *testing.T) { }) out, err := Walk(value, tracker.track) assert.Equal(t, cerr, err) - assert.Equal(t, NilValue, out) + assert.Equal(t, InvalidValue, out) // The callback should have been called twice. assert.Len(t, tracker.calls, 2) @@ -239,7 +239,7 @@ func TestWalkSequenceError(t *testing.T) { }) out, err := Walk(value, tracker.track) assert.Equal(t, cerr, err) - assert.Equal(t, NilValue, out) + assert.Equal(t, InvalidValue, out) // The callback should have been called three times. assert.Len(t, tracker.calls, 3) diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index 908793d58..e6a16f79e 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -55,7 +55,7 @@ func (d *loader) load(node *yaml.Node) (dyn.Value, error) { case yaml.AliasNode: value, err = d.loadAlias(node, loc) default: - return dyn.NilValue, errorf(loc, "unknown node kind: %v", node.Kind) + return dyn.InvalidValue, errorf(loc, "unknown node kind: %v", node.Kind) } if err != nil { @@ -80,7 +80,7 @@ func (d *loader) loadSequence(node *yaml.Node, loc dyn.Location) (dyn.Value, err for i, n := range node.Content { v, err := d.load(n) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } acc[i] = v @@ -99,7 +99,7 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro // Assert that keys are strings if key.Kind != yaml.ScalarNode { - return dyn.NilValue, errorf(loc, "key is not a scalar") + return dyn.InvalidValue, errorf(loc, "key is not a scalar") } st := key.ShortTag() @@ -113,17 +113,17 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro merge = val continue default: - return dyn.NilValue, errorf(loc, "invalid key tag: %v", st) + return dyn.InvalidValue, errorf(loc, "invalid key tag: %v", st) } k, err := d.load(key) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } v, err := d.load(val) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } acc.Set(k, v) @@ -155,7 +155,7 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro for _, n := range mnodes { v, err := d.load(n) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } m, ok := v.AsMap() if !ok { @@ -186,12 +186,12 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error case "false": return dyn.NewValue(false, loc), nil default: - return dyn.NilValue, errorf(loc, "invalid bool value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid bool value: %v", node.Value) } case "!!int": i64, err := strconv.ParseInt(node.Value, 10, 64) if err != nil { - return dyn.NilValue, errorf(loc, "invalid int value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid int value: %v", node.Value) } // Use regular int type instead of int64 if possible. if i64 >= math.MinInt32 && i64 <= math.MaxInt32 { @@ -201,7 +201,7 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error case "!!float": f64, err := strconv.ParseFloat(node.Value, 64) if err != nil { - return dyn.NilValue, errorf(loc, "invalid float value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid float value: %v", node.Value) } return dyn.NewValue(f64, loc), nil case "!!null": @@ -219,9 +219,9 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error return dyn.NewValue(t, loc), nil } } - return dyn.NilValue, errorf(loc, "invalid timestamp value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid timestamp value: %v", node.Value) default: - return dyn.NilValue, errorf(loc, "unknown tag: %v", st) + return dyn.InvalidValue, errorf(loc, "unknown tag: %v", st) } } diff --git a/libs/dyn/yamlloader/yaml.go b/libs/dyn/yamlloader/yaml.go index a18324ffa..b79b41e1e 100644 --- a/libs/dyn/yamlloader/yaml.go +++ b/libs/dyn/yamlloader/yaml.go @@ -15,7 +15,7 @@ func LoadYAML(path string, r io.Reader) (dyn.Value, error) { if err == io.EOF { return dyn.NilValue, nil } - return dyn.NilValue, err + return dyn.InvalidValue, err } return newLoader(path).load(&node) diff --git a/libs/dyn/yamlsaver/utils.go b/libs/dyn/yamlsaver/utils.go index 6149491d6..fa5ab08fb 100644 --- a/libs/dyn/yamlsaver/utils.go +++ b/libs/dyn/yamlsaver/utils.go @@ -15,7 +15,7 @@ func ConvertToMapValue(strct any, order *Order, skipFields []string, dst map[str ref := dyn.NilValue mv, err := convert.FromTyped(strct, ref) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } if mv.Kind() != dyn.KindMap { diff --git a/libs/filer/local_client.go b/libs/filer/local_client.go index 9398958f5..48e8a05ee 100644 --- a/libs/filer/local_client.go +++ b/libs/filer/local_client.go @@ -2,6 +2,7 @@ package filer import ( "context" + "errors" "io" "io/fs" "os" @@ -35,7 +36,7 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, } f, err := os.OpenFile(absPath, flags, 0644) - if os.IsNotExist(err) && slices.Contains(mode, CreateParentDirectories) { + if errors.Is(err, fs.ErrNotExist) && slices.Contains(mode, CreateParentDirectories) { // Create parent directories if they don't exist. err = os.MkdirAll(filepath.Dir(absPath), 0755) if err != nil { @@ -47,9 +48,9 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, if err != nil { switch { - case os.IsNotExist(err): + case errors.Is(err, fs.ErrNotExist): return NoSuchDirectoryError{path: absPath} - case os.IsExist(err): + case errors.Is(err, fs.ErrExist): return FileAlreadyExistsError{path: absPath} default: return err @@ -77,7 +78,7 @@ func (w *LocalClient) Read(ctx context.Context, name string) (io.ReadCloser, err // 2. Allows us to error out if the path is a directory stat, err := os.Stat(absPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, FileDoesNotExistError{path: absPath} } return nil, err @@ -108,11 +109,11 @@ func (w *LocalClient) Delete(ctx context.Context, name string, mode ...DeleteMod return nil } - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return FileDoesNotExistError{path: absPath} } - if os.IsExist(err) { + if errors.Is(err, fs.ErrExist) { if slices.Contains(mode, DeleteRecursively) { return os.RemoveAll(absPath) } @@ -130,7 +131,7 @@ func (w *LocalClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, stat, err := os.Stat(absPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, NoSuchDirectoryError{path: absPath} } return nil, err @@ -159,7 +160,7 @@ func (w *LocalClient) Stat(ctx context.Context, name string) (fs.FileInfo, error } stat, err := os.Stat(absPath) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, FileDoesNotExistError{path: absPath} } return stat, err diff --git a/libs/filer/workspace_files_client.go b/libs/filer/workspace_files_client.go index 60e6676aa..194317578 100644 --- a/libs/filer/workspace_files_client.go +++ b/libs/filer/workspace_files_client.go @@ -19,6 +19,7 @@ import ( "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -35,22 +36,36 @@ func (entry wsfsDirEntry) Info() (fs.FileInfo, error) { return entry.wsfsFileInfo, nil } +func wsfsDirEntriesFromObjectInfos(objects []workspace.ObjectInfo) []fs.DirEntry { + info := make([]fs.DirEntry, len(objects)) + for i, v := range objects { + info[i] = wsfsDirEntry{wsfsFileInfo{ObjectInfo: v}} + } + + // Sort by name for parity with os.ReadDir. + sort.Slice(info, func(i, j int) bool { return info[i].Name() < info[j].Name() }) + return info +} + // Type that implements fs.FileInfo for WSFS. type wsfsFileInfo struct { - oi workspace.ObjectInfo + workspace.ObjectInfo + + // The export format of a notebook. This is not exposed by the SDK. + ReposExportFormat workspace.ExportFormat `json:"repos_export_format,omitempty"` } func (info wsfsFileInfo) Name() string { - return path.Base(info.oi.Path) + return path.Base(info.ObjectInfo.Path) } func (info wsfsFileInfo) Size() int64 { - return info.oi.Size + return info.ObjectInfo.Size } func (info wsfsFileInfo) Mode() fs.FileMode { - switch info.oi.ObjectType { - case workspace.ObjectTypeDirectory: + switch info.ObjectInfo.ObjectType { + case workspace.ObjectTypeDirectory, workspace.ObjectTypeRepo: return fs.ModeDir default: return fs.ModePerm @@ -58,15 +73,29 @@ func (info wsfsFileInfo) Mode() fs.FileMode { } func (info wsfsFileInfo) ModTime() time.Time { - return time.UnixMilli(info.oi.ModifiedAt) + return time.UnixMilli(info.ObjectInfo.ModifiedAt) } func (info wsfsFileInfo) IsDir() bool { - return info.oi.ObjectType == workspace.ObjectTypeDirectory + return info.Mode() == fs.ModeDir } func (info wsfsFileInfo) Sys() any { - return info.oi + return info.ObjectInfo +} + +// UnmarshalJSON is a custom unmarshaller for the wsfsFileInfo struct. +// It must be defined for this type because otherwise the implementation +// of the embedded ObjectInfo type will be used. +func (info *wsfsFileInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, info) +} + +// MarshalJSON is a custom marshaller for the wsfsFileInfo struct. +// It must be defined for this type because otherwise the implementation +// of the embedded ObjectInfo type will be used. +func (info *wsfsFileInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(info) } // WorkspaceFilesClient implements the files-in-workspace API. @@ -276,14 +305,8 @@ func (w *WorkspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.D return nil, err } - info := make([]fs.DirEntry, len(objects)) - for i, v := range objects { - info[i] = wsfsDirEntry{wsfsFileInfo{oi: v}} - } - - // Sort by name for parity with os.ReadDir. - sort.Slice(info, func(i, j int) bool { return info[i].Name() < info[j].Name() }) - return info, nil + // Convert to fs.DirEntry. + return wsfsDirEntriesFromObjectInfos(objects), nil } func (w *WorkspaceFilesClient) Mkdir(ctx context.Context, name string) error { @@ -302,7 +325,22 @@ func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileIn return nil, err } - info, err := w.workspaceClient.Workspace.GetStatusByPath(ctx, absPath) + var stat wsfsFileInfo + + // Perform bespoke API call because "return_export_info" is not exposed by the SDK. + // We need "repos_export_format" to determine if the file is a py or a ipynb notebook. + // This is not exposed by the SDK so we need to make a direct API call. + err = w.apiClient.Do( + ctx, + http.MethodGet, + "/api/2.0/workspace/get-status", + nil, + map[string]string{ + "path": absPath, + "return_export_info": "true", + }, + &stat, + ) if err != nil { // If we got an API error we deal with it below. var aerr *apierr.APIError @@ -316,5 +354,5 @@ func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileIn } } - return wsfsFileInfo{*info}, nil + return stat, nil } diff --git a/libs/filer/workspace_files_client_test.go b/libs/filer/workspace_files_client_test.go new file mode 100644 index 000000000..650b5be68 --- /dev/null +++ b/libs/filer/workspace_files_client_test.go @@ -0,0 +1,95 @@ +package filer + +import ( + "encoding/json" + "io/fs" + "testing" + "time" + + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWorkspaceFilesDirEntry(t *testing.T) { + entries := wsfsDirEntriesFromObjectInfos([]workspace.ObjectInfo{ + { + Path: "/dir", + ObjectType: workspace.ObjectTypeDirectory, + }, + { + Path: "/file", + ObjectType: workspace.ObjectTypeFile, + Size: 42, + }, + { + Path: "/repo", + ObjectType: workspace.ObjectTypeRepo, + }, + }) + + // Confirm the path is passed through correctly. + assert.Equal(t, "dir", entries[0].Name()) + assert.Equal(t, "file", entries[1].Name()) + assert.Equal(t, "repo", entries[2].Name()) + + // Confirm the type is passed through correctly. + assert.Equal(t, fs.ModeDir, entries[0].Type()) + assert.Equal(t, fs.ModePerm, entries[1].Type()) + assert.Equal(t, fs.ModeDir, entries[2].Type()) + + // Get [fs.FileInfo] from directory entry. + i0, err := entries[0].Info() + require.NoError(t, err) + i1, err := entries[1].Info() + require.NoError(t, err) + i2, err := entries[2].Info() + require.NoError(t, err) + + // Confirm size. + assert.Equal(t, int64(0), i0.Size()) + assert.Equal(t, int64(42), i1.Size()) + assert.Equal(t, int64(0), i2.Size()) + + // Confirm IsDir. + assert.True(t, i0.IsDir()) + assert.False(t, i1.IsDir()) + assert.True(t, i2.IsDir()) +} + +func TestWorkspaceFilesClient_wsfsUnmarshal(t *testing.T) { + payload := ` + { + "created_at": 1671030805916, + "language": "PYTHON", + "modified_at": 1671032235392, + "object_id": 795822750063438, + "object_type": "NOTEBOOK", + "path": "/some/path/to/a/notebook", + "repos_export_format": "SOURCE", + "resource_id": "795822750063438" + } + ` + + var info wsfsFileInfo + err := json.Unmarshal([]byte(payload), &info) + require.NoError(t, err) + + // Fields in the object info. + assert.Equal(t, int64(1671030805916), info.CreatedAt) + assert.Equal(t, workspace.LanguagePython, info.Language) + assert.Equal(t, int64(1671032235392), info.ModifiedAt) + assert.Equal(t, int64(795822750063438), info.ObjectId) + assert.Equal(t, workspace.ObjectTypeNotebook, info.ObjectType) + assert.Equal(t, "/some/path/to/a/notebook", info.Path) + assert.Equal(t, workspace.ExportFormatSource, info.ReposExportFormat) + assert.Equal(t, "795822750063438", info.ResourceId) + + // Functions for fs.FileInfo. + assert.Equal(t, "notebook", info.Name()) + assert.Equal(t, int64(0), info.Size()) + assert.Equal(t, fs.ModePerm, info.Mode()) + assert.Equal(t, time.UnixMilli(1671032235392), info.ModTime()) + assert.False(t, info.IsDir()) + assert.NotNil(t, info.Sys()) +} diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go index bad748b10..a872dcc65 100644 --- a/libs/filer/workspace_files_extensions_client.go +++ b/libs/filer/workspace_files_extensions_client.go @@ -6,22 +6,17 @@ import ( "fmt" "io" "io/fs" - "net/http" "path" "strings" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/notebook" "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" - "github.com/databricks/databricks-sdk-go/client" - "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/workspace" ) type workspaceFilesExtensionsClient struct { workspaceClient *databricks.WorkspaceClient - apiClient *client.DatabricksClient wsfs Filer root string @@ -35,64 +30,20 @@ var extensionsToLanguages = map[string]workspace.Language{ ".ipynb": workspace.LanguagePython, } -// workspaceFileStatus defines a custom response body for the "/api/2.0/workspace/get-status" API. -// The "repos_export_format" field is not exposed by the SDK. type workspaceFileStatus struct { - *workspace.ObjectInfo - - // The export format of the notebook. This is not exposed by the SDK. - ReposExportFormat workspace.ExportFormat `json:"repos_export_format,omitempty"` + wsfsFileInfo // Name of the file to be used in any API calls made using the workspace files // filer. For notebooks this path does not include the extension. nameForWorkspaceAPI string } -// A custom unmarsaller for the workspaceFileStatus struct. This is needed because -// workspaceFileStatus embeds the workspace.ObjectInfo which itself has a custom -// unmarshaller. -// If a custom unmarshaller is not provided extra fields like ReposExportFormat -// will not have values set. -func (s *workspaceFileStatus) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s *workspaceFileStatus) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -func (w *workspaceFilesExtensionsClient) stat(ctx context.Context, name string) (*workspaceFileStatus, error) { - stat := &workspaceFileStatus{ - nameForWorkspaceAPI: name, - } - - // Perform bespoke API call because "return_export_info" is not exposed by the SDK. - // We need "repos_export_format" to determine if the file is a py or a ipynb notebook. - // This is not exposed by the SDK so we need to make a direct API call. - err := w.apiClient.Do( - ctx, - http.MethodGet, - "/api/2.0/workspace/get-status", - nil, - map[string]string{ - "path": path.Join(w.root, name), - "return_export_info": "true", - }, - stat, - ) +func (w *workspaceFilesExtensionsClient) stat(ctx context.Context, name string) (wsfsFileInfo, error) { + info, err := w.wsfs.Stat(ctx, name) if err != nil { - // If we got an API error we deal with it below. - var aerr *apierr.APIError - if !errors.As(err, &aerr) { - return nil, err - } - - // This API returns a 404 if the specified path does not exist. - if aerr.StatusCode == http.StatusNotFound { - return nil, FileDoesNotExistError{path.Join(w.root, name)} - } + return wsfsFileInfo{}, err } - return stat, err + return info.(wsfsFileInfo), err } // This function returns the stat for the provided notebook. The stat object itself contains the path @@ -146,7 +97,10 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx contex // Modify the stat object path to include the extension. This stat object will be used // to return the fs.FileInfo object in the stat method. stat.Path = stat.Path + ext - return stat, nil + return &workspaceFileStatus{ + wsfsFileInfo: stat, + nameForWorkspaceAPI: nameWithoutExt, + }, nil } func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx context.Context, name string) (*workspaceFileStatus, error) { @@ -162,7 +116,7 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx con } // Get the extension for the notebook. - ext := notebook.GetExtensionByLanguage(stat.ObjectInfo) + ext := notebook.GetExtensionByLanguage(&stat.ObjectInfo) // If the notebook was exported as a Jupyter notebook, the extension should be .ipynb. if stat.Language == workspace.LanguagePython && stat.ReposExportFormat == workspace.ExportFormatJupyter { @@ -172,7 +126,10 @@ func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx con // Modify the stat object path to include the extension. This stat object will be used // to return the fs.DirEntry object in the ReadDir method. stat.Path = stat.Path + ext - return stat, nil + return &workspaceFileStatus{ + wsfsFileInfo: stat, + nameForWorkspaceAPI: name, + }, nil } type DuplicatePathError struct { @@ -200,11 +157,6 @@ func (e DuplicatePathError) Error() string { // errors for namespace clashes (e.g. a file and a notebook or a directory and a notebook). // Thus users of these methods should be careful to avoid such clashes. func NewWorkspaceFilesExtensionsClient(w *databricks.WorkspaceClient, root string) (Filer, error) { - apiClient, err := client.New(w.Config) - if err != nil { - return nil, err - } - filer, err := NewWorkspaceFilesClient(w, root) if err != nil { return nil, err @@ -212,7 +164,6 @@ func NewWorkspaceFilesExtensionsClient(w *databricks.WorkspaceClient, root strin return &workspaceFilesExtensionsClient{ workspaceClient: w, - apiClient: apiClient, wsfs: filer, root: root, @@ -235,12 +186,12 @@ func (w *workspaceFilesExtensionsClient) ReadDir(ctx context.Context, name strin // If the object is a notebook, include an extension in the entry. if sysInfo.ObjectType == workspace.ObjectTypeNotebook { - stat, err := w.getNotebookStatByNameWithoutExt(ctx, entries[i].Name()) + stat, err := w.getNotebookStatByNameWithoutExt(ctx, path.Join(name, entries[i].Name())) if err != nil { return nil, err } // Replace the entry with the new entry that includes the extension. - entries[i] = wsfsDirEntry{wsfsFileInfo{oi: *stat.ObjectInfo}} + entries[i] = wsfsDirEntry{wsfsFileInfo{ObjectInfo: stat.ObjectInfo}} } // Error if we have seen this path before in the current directory. @@ -331,7 +282,7 @@ func (w *workspaceFilesExtensionsClient) Stat(ctx context.Context, name string) return nil, err } - return wsfsFileInfo{oi: *stat.ObjectInfo}, nil + return wsfsFileInfo{ObjectInfo: stat.ObjectInfo}, nil } return info, err diff --git a/libs/git/config.go b/libs/git/config.go index 424d453bc..fafd81bd6 100644 --- a/libs/git/config.go +++ b/libs/git/config.go @@ -1,8 +1,10 @@ package git import ( + "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "regexp" @@ -88,12 +90,12 @@ func (c config) load(r io.Reader) error { return nil } -func (c config) loadFile(fs vfs.Path, path string) error { - f, err := fs.Open(path) +func (c config) loadFile(root vfs.Path, path string) error { + f, err := root.Open(path) if err != nil { // If the file doesn't exist it is ignored. // This is the case for both global and repository specific config files. - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } return err @@ -130,7 +132,7 @@ func (c config) coreExcludesFile() (string, error) { // If there are other problems accessing this file we would // run into them at a later point anyway. _, err := os.Stat(path) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return "", err } diff --git a/libs/git/ignore.go b/libs/git/ignore.go index df3a4e919..9f501e472 100644 --- a/libs/git/ignore.go +++ b/libs/git/ignore.go @@ -1,8 +1,8 @@ package git import ( + "errors" "io/fs" - "os" "strings" "time" @@ -74,7 +74,7 @@ func (f *ignoreFile) load() error { // If it doesn't exist, treat it as an empty file. stat, err := fs.Stat(f.root, f.path) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } return err diff --git a/libs/git/reference.go b/libs/git/reference.go index 2b4bd3e4d..2165a9cda 100644 --- a/libs/git/reference.go +++ b/libs/git/reference.go @@ -1,9 +1,9 @@ package git import ( + "errors" "fmt" "io/fs" - "os" "regexp" "strings" @@ -42,7 +42,7 @@ func isSHA1(s string) bool { func LoadReferenceFile(root vfs.Path, path string) (*Reference, error) { // read reference file content b, err := fs.ReadFile(root, path) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, nil } if err != nil { diff --git a/libs/git/repository.go b/libs/git/repository.go index 6baf26c2e..6940ddac8 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -1,8 +1,10 @@ package git import ( + "errors" "fmt" - "os" + "io/fs" + "net/url" "path" "path/filepath" "strings" @@ -99,7 +101,22 @@ func (r *Repository) LatestCommit() (string, error) { // return origin url if it's defined, otherwise an empty string func (r *Repository) OriginUrl() string { - return r.config.variables["remote.origin.url"] + rawUrl := r.config.variables["remote.origin.url"] + + // Remove username and password from the URL. + parsedUrl, err := url.Parse(rawUrl) + if err != nil { + // Git supports https URLs and non standard URLs like "ssh://" or "file://". + // Parsing these URLs is not supported by the Go standard library. In case + // of an error, we return the raw URL. This is okay because for ssh URLs + // because passwords cannot be included in the URL. + return rawUrl + } + // Setting User to nil removes the username and password from the URL when + // .String() is called. + // See: https://pkg.go.dev/net/url#URL.String + parsedUrl.User = nil + return parsedUrl.String() } // loadConfig loads and combines user specific and repository specific configuration files. @@ -190,7 +207,7 @@ func NewRepository(path vfs.Path) (*Repository, error) { real := true rootPath, err := vfs.FindLeafInTree(path, GitDirectoryName) if err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, fs.ErrNotExist) { return nil, err } // Cannot find `.git` directory. diff --git a/libs/git/repository_test.go b/libs/git/repository_test.go index 7ddc7ea79..a28038eeb 100644 --- a/libs/git/repository_test.go +++ b/libs/git/repository_test.go @@ -207,3 +207,9 @@ func TestRepositoryGitConfigWhenNotARepo(t *testing.T) { originUrl := repo.OriginUrl() assert.Equal(t, "", originUrl) } + +func TestRepositoryOriginUrlRemovesUserCreds(t *testing.T) { + repo := newTestRepository(t) + repo.addOriginUrl("https://username:token@github.com/databricks/foobar.git") + repo.assertOriginUrl("https://github.com/databricks/foobar.git") +} diff --git a/libs/notebook/detect_test.go b/libs/notebook/detect_test.go index 5d3aa8a81..fd3337579 100644 --- a/libs/notebook/detect_test.go +++ b/libs/notebook/detect_test.go @@ -1,6 +1,8 @@ package notebook import ( + "errors" + "io/fs" "os" "path/filepath" "testing" @@ -50,7 +52,7 @@ func TestDetectCallsDetectJupyter(t *testing.T) { func TestDetectUnknownExtension(t *testing.T) { _, _, err := Detect("./testdata/doesntexist.foobar") - assert.True(t, os.IsNotExist(err)) + assert.True(t, errors.Is(err, fs.ErrNotExist)) nb, _, err := Detect("./testdata/unknown_extension.foobar") require.NoError(t, err) @@ -59,7 +61,7 @@ func TestDetectUnknownExtension(t *testing.T) { func TestDetectNoExtension(t *testing.T) { _, _, err := Detect("./testdata/doesntexist") - assert.True(t, os.IsNotExist(err)) + assert.True(t, errors.Is(err, fs.ErrNotExist)) nb, _, err := Detect("./testdata/no_extension") require.NoError(t, err) diff --git a/libs/process/background_test.go b/libs/process/background_test.go index 2ee6727a0..2e47e814b 100644 --- a/libs/process/background_test.go +++ b/libs/process/background_test.go @@ -5,7 +5,6 @@ import ( "bytes" "context" "fmt" - "os" "os/exec" "strings" "testing" @@ -26,8 +25,8 @@ func splitLines(b []byte) (lines []string) { func TestBackgroundUnwrapsNotFound(t *testing.T) { ctx := context.Background() - _, err := Background(ctx, []string{"/bin/meeecho", "1"}) - assert.ErrorIs(t, err, os.ErrNotExist) + _, err := Background(ctx, []string{"meeecho", "1"}) + assert.ErrorIs(t, err, exec.ErrNotFound) } func TestBackground(t *testing.T) { diff --git a/libs/process/opts.go b/libs/process/opts.go index e201c6668..9516e49ba 100644 --- a/libs/process/opts.go +++ b/libs/process/opts.go @@ -48,6 +48,27 @@ func WithStdoutPipe(dst *io.ReadCloser) execOption { } } +func WithStdinReader(src io.Reader) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stdin = src + return nil + } +} + +func WithStderrWriter(dst io.Writer) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stderr = dst + return nil + } +} + +func WithStdoutWriter(dst io.Writer) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stdout = dst + return nil + } +} + func WithCombinedOutput(buf *bytes.Buffer) execOption { return func(_ context.Context, c *exec.Cmd) error { c.Stdout = io.MultiWriter(buf, c.Stdout) diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index 392e274d4..f2920d8c2 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -3,7 +3,9 @@ package sync import ( "context" "encoding/json" + "errors" "fmt" + "io/fs" "os" "path/filepath" "time" @@ -33,7 +35,7 @@ const LatestSnapshotVersion = "v1" type Snapshot struct { // Path where this snapshot was loaded from and will be saved to. // Intentionally not part of the snapshot state because it may be moved by the user. - SnapshotPath string `json:"-"` + snapshotPath string // New indicates if this is a fresh snapshot or if it was loaded from disk. New bool `json:"-"` @@ -68,7 +70,7 @@ func NewSnapshot(localFiles []fileset.File, opts *SyncOptions) (*Snapshot, error snapshotState.ResetLastModifiedTimes() return &Snapshot{ - SnapshotPath: snapshotPath, + snapshotPath: snapshotPath, New: true, Version: LatestSnapshotVersion, Host: opts.Host, @@ -88,7 +90,7 @@ func GetFileName(host, remotePath string) string { // precisely it's the first 16 characters of md5(concat(host, remotePath)) func SnapshotPath(opts *SyncOptions) (string, error) { snapshotDir := filepath.Join(opts.SnapshotBasePath, syncSnapshotDirName) - if _, err := os.Stat(snapshotDir); os.IsNotExist(err) { + if _, err := os.Stat(snapshotDir); errors.Is(err, fs.ErrNotExist) { err = os.MkdirAll(snapshotDir, 0755) if err != nil { return "", fmt.Errorf("failed to create config directory: %s", err) @@ -105,7 +107,7 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { } return &Snapshot{ - SnapshotPath: path, + snapshotPath: path, New: true, Version: LatestSnapshotVersion, @@ -120,7 +122,7 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { } func (s *Snapshot) Save(ctx context.Context) error { - f, err := os.OpenFile(s.SnapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + f, err := os.OpenFile(s.snapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return fmt.Errorf("failed to create/open persisted sync snapshot file: %s", err) } @@ -145,11 +147,11 @@ func loadOrNewSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error } // Snapshot file not found. We return the new copy. - if _, err := os.Stat(snapshot.SnapshotPath); os.IsNotExist(err) { + if _, err := os.Stat(snapshot.snapshotPath); errors.Is(err, fs.ErrNotExist) { return snapshot, nil } - bytes, err := os.ReadFile(snapshot.SnapshotPath) + bytes, err := os.ReadFile(snapshot.snapshotPath) if err != nil { return nil, fmt.Errorf("failed to read sync snapshot from disk: %s", err) } @@ -189,7 +191,7 @@ func (s *Snapshot) diff(ctx context.Context, all []fileset.File) (diff, error) { currentState := s.SnapshotState if err := currentState.validate(); err != nil { - return diff{}, fmt.Errorf("error parsing existing sync state. Please delete your existing sync snapshot file (%s) and retry: %w", s.SnapshotPath, err) + return diff{}, fmt.Errorf("error parsing existing sync state. Please delete your existing sync snapshot file (%s) and retry: %w", s.snapshotPath, err) } // Compute diff to apply to get from current state to new target state. diff --git a/libs/sync/sync.go b/libs/sync/sync.go index 585e8a887..3d5bc61ec 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -152,36 +152,41 @@ func (s *Sync) notifyComplete(ctx context.Context, d diff) { s.seq++ } -func (s *Sync) RunOnce(ctx context.Context) error { +// Upload all files in the file tree rooted at the local path configured in the +// SyncOptions to the remote path configured in the SyncOptions. +// +// Returns the list of files tracked (and synchronized) by the syncer during the run, +// and an error if any occurred. +func (s *Sync) RunOnce(ctx context.Context) ([]fileset.File, error) { files, err := s.GetFileList(ctx) if err != nil { - return err + return files, err } change, err := s.snapshot.diff(ctx, files) if err != nil { - return err + return files, err } s.notifyStart(ctx, change) if change.IsEmpty() { s.notifyComplete(ctx, change) - return nil + return files, nil } err = s.applyDiff(ctx, change) if err != nil { - return err + return files, err } err = s.snapshot.Save(ctx) if err != nil { log.Errorf(ctx, "cannot store snapshot: %s", err) - return err + return files, err } s.notifyComplete(ctx, change) - return nil + return files, nil } func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { @@ -218,10 +223,6 @@ func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { return all.Iter(), nil } -func (s *Sync) SnapshotPath() string { - return s.snapshot.SnapshotPath -} - func (s *Sync) RunContinuous(ctx context.Context) error { ticker := time.NewTicker(s.PollInterval) defer ticker.Stop() @@ -231,7 +232,7 @@ func (s *Sync) RunContinuous(ctx context.Context) error { case <-ctx.Done(): return ctx.Err() case <-ticker.C: - err := s.RunOnce(ctx) + _, err := s.RunOnce(ctx) if err != nil { return err } diff --git a/libs/template/helpers.go b/libs/template/helpers.go index d15a801d6..b3dea329e 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "math/rand" "net/url" "os" "regexp" @@ -46,6 +47,10 @@ func loadHelpers(ctx context.Context) template.FuncMap { "regexp": func(expr string) (*regexp.Regexp, error) { return regexp.Compile(expr) }, + // Alias for https://pkg.go.dev/math/rand#Intn. Returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). + "random_int": func(n int) int { + return rand.Intn(n) + }, // A key value pair. This is used with the map function to generate maps // to use inside a template "pair": func(k string, v any) pair { diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index a07b26f81..c0848c8d0 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -3,6 +3,7 @@ package template import ( "context" "os" + "strconv" "strings" "testing" @@ -50,6 +51,24 @@ func TestTemplateRegexpCompileFunction(t *testing.T) { assert.Contains(t, content, "1:fool") } +func TestTemplateRandIntFunction(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/random-int/template", "./testdata/random-int/library", tmpDir) + require.NoError(t, err) + + err = r.walk() + assert.NoError(t, err) + + assert.Len(t, r.files, 1) + randInt, err := strconv.Atoi(strings.TrimSpace(string(r.files[0].(*inMemoryFile).content))) + assert.Less(t, randInt, 10) + assert.Empty(t, err) +} + func TestTemplateUrlFunction(t *testing.T) { ctx := context.Background() tmpDir := t.TempDir() diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 811ef9259..d824bf381 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -3,6 +3,7 @@ package template import ( "context" "embed" + "errors" "fmt" "io/fs" "os" @@ -44,7 +45,7 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st schemaPath := filepath.Join(templateRoot, schemaFileName) helpers := loadHelpers(ctx) - if _, err := os.Stat(schemaPath); os.IsNotExist(err) { + if _, err := os.Stat(schemaPath); errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("not a bundle template: expected to find a template schema file at %s", schemaPath) } @@ -53,12 +54,6 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st return err } - // Print welcome message - welcome := config.schema.WelcomeMessage - if welcome != "" { - cmdio.LogString(ctx, welcome) - } - // Read and assign config values from file if configFilePath != "" { err = config.assignValuesFromFile(configFilePath) @@ -72,6 +67,16 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st return err } + // Print welcome message + welcome := config.schema.WelcomeMessage + if welcome != "" { + welcome, err = r.executeTemplate(welcome) + if err != nil { + return err + } + cmdio.LogString(ctx, welcome) + } + // Prompt user for any missing config values. Assign default values if // terminal is not TTY err = config.promptOrAssignDefaultValues(r) diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 6415cd84a..827f30133 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path" "path/filepath" @@ -313,7 +314,7 @@ func (r *renderer) persistToDisk() error { if err == nil { return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path) } - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("error while verifying file %s does not already exist: %w", path, err) } } diff --git a/libs/template/templates/dbt-sql/databricks_template_schema.json b/libs/template/templates/dbt-sql/databricks_template_schema.json index 7b39f6187..cccf145dc 100644 --- a/libs/template/templates/dbt-sql/databricks_template_schema.json +++ b/libs/template/templates/dbt-sql/databricks_template_schema.json @@ -1,5 +1,5 @@ { - "welcome_message": "\nWelcome to the (EXPERIMENTAL) dbt template for Databricks Asset Bundles!", + "welcome_message": "\nWelcome to the dbt template for Databricks Asset Bundles!\n\nA workspace was selected based on your current profile. For information about how to change this, see https://docs.databricks.com/dev-tools/cli/profiles.html.\nworkspace_host: {{workspace_host}}", "properties": { "project_name": { "type": "string", @@ -13,7 +13,7 @@ "type": "string", "pattern": "^/sql/.\\../warehouses/[a-z0-9]+$", "pattern_match_failure_message": "Path must be of the form /sql/1.0/warehouses/", - "description": " \nPlease provide the HTTP Path of the SQL warehouse you would like to use with dbt during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", + "description": "\nPlease provide the HTTP Path of the SQL warehouse you would like to use with dbt during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", "order": 2 }, "default_catalog": { diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl index d46b61f72..dbf8a8d85 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl @@ -94,7 +94,7 @@ target-specific settings. Read more about dbt profiles on Databricks at https://docs.databricks.com/en/workflows/jobs/how-to/use-dbt-in-workflows.html#advanced-run-dbt-with-a-custom-profile. The target workspaces for staging and prod are defined in databricks.yml. -You can manaully deploy based on these configurations (see below). +You can manually deploy based on these configurations (see below). Or you can use CI/CD to automate deployment. See https://docs.databricks.com/dev-tools/bundles/ci-cd.html for documentation on CI/CD setup. diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl index d29bd55ce..e96931e2d 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl @@ -3,26 +3,35 @@ {{- $catalog = "\"\" # workspace default"}} {{- end}} # This file defines dbt profiles for deployed dbt jobs. -# Note that for local development you should create your own, local profile. -# (see README.md). -my_dbt_project: +{{.project_name}}: target: dev # default target outputs: - dev: + # Doing local development with the dbt CLI? + # Then you should create your own profile in your .dbt/profiles.yml using 'dbt init' + # (See README.md) + + # The default target when deployed with the Databricks CLI + # N.B. when you use dbt from the command line, it uses the profile from .dbt/profiles.yml + dev: type: databricks method: http catalog: {{$catalog}} +{{- if (regexp "^yes").MatchString .personal_schemas}} schema: "{{"{{"}} var('dev_schema') {{"}}"}}" +{{- else}} + schema: "{{.shared_schema}}" +{{- end}} http_path: {{.http_path}} # The workspace host / token are provided by Databricks - # see databricks.yml for the host used for 'dev' + # see databricks.yml for the workspace host used for 'dev' host: "{{"{{"}} env_var('DBT_HOST') {{"}}"}}" token: "{{"{{"}} env_var('DBT_ACCESS_TOKEN') {{"}}"}}" - prod: + # The production target when deployed with the Databricks CLI + prod: type: databricks method: http catalog: {{$catalog}} @@ -31,6 +40,6 @@ my_dbt_project: http_path: {{.http_path}} # The workspace host / token are provided by Databricks - # see databricks.yml for the host used for 'dev' + # see databricks.yml for the workspace host used for 'prod' host: "{{"{{"}} env_var('DBT_HOST') {{"}}"}}" token: "{{"{{"}} env_var('DBT_ACCESS_TOKEN') {{"}}"}}" diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt b/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt index 10d7b9f10..e6b861203 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt @@ -1,3 +1,3 @@ ## requirements-dev.txt: dependencies for local development. -dbt-databricks>=1.0.0,<2.0.0 +dbt-databricks>=1.8.0,<2.0.0 diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl index 688c23b92..bad12c755 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl @@ -12,10 +12,6 @@ resources: on_failure: - {{user_name}} -{{- $dev_schema := .shared_schema }} -{{- if (regexp "^yes").MatchString .personal_schemas}} -{{- $dev_schema = "${workspace.current_user.short_name}"}} -{{- end}} tasks: - task_key: dbt @@ -25,13 +21,21 @@ resources: # The default schema, catalog, etc. are defined in ../dbt_profiles/profiles.yml profiles_directory: dbt_profiles/ commands: +{{- if (regexp "^yes").MatchString .personal_schemas}} + # The dbt commands to run (see also dbt_profiles/profiles.yml; dev_schema is used in the dev profile) - 'dbt deps --target=${bundle.target}' - - 'dbt seed --target=${bundle.target} --vars "{ dev_schema: {{$dev_schema}} }"' - - 'dbt run --target=${bundle.target} --vars "{ dev_schema: {{$dev_schema}} }"' + - 'dbt seed --target=${bundle.target} --vars "{ dev_schema: ${workspace.current_user.short_name} }"' + - 'dbt run --target=${bundle.target} --vars "{ dev_schema: ${workspace.current_user.short_name} }"' +{{- else}} + # The dbt commands to run (see also the dev/prod profiles in dbt_profiles/profiles.yml) + - 'dbt deps --target=${bundle.target}' + - 'dbt seed --target=${bundle.target}' + - 'dbt run --target=${bundle.target}' +{{- end}} libraries: - pypi: - package: dbt-databricks>=1.0.0,<2.0.0 + package: dbt-databricks>=1.8.0,<2.0.0 new_cluster: spark_version: {{template "latest_lts_dbr_version"}} diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml index d34b9e645..c64f1bfce 100644 --- a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml @@ -7,7 +7,7 @@ models: columns: - name: customer_name description: "The name of a customer" - tests: + data_tests: - unique - not_null @@ -16,6 +16,6 @@ models: columns: - name: order_date description: "The date on which orders took place" - tests: + data_tests: - unique - not_null diff --git a/libs/template/templates/default-sql/databricks_template_schema.json b/libs/template/templates/default-sql/databricks_template_schema.json index b7a42e198..329f91962 100644 --- a/libs/template/templates/default-sql/databricks_template_schema.json +++ b/libs/template/templates/default-sql/databricks_template_schema.json @@ -1,5 +1,5 @@ { - "welcome_message": "\nWelcome to the (EXPERIMENTAL) default SQL template for Databricks Asset Bundles!", + "welcome_message": "\nWelcome to the default SQL template for Databricks Asset Bundles!\n\nA workspace was selected based on your current profile. For information about how to change this, see https://docs.databricks.com/dev-tools/cli/profiles.html.\nworkspace_host: {{workspace_host}}", "properties": { "project_name": { "type": "string", diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl index 76ecadd3e..8a9d12ea8 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl @@ -1,14 +1,22 @@ -- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml) -{{- /* We can't use a materialized view here since they don't support 'create or refresh yet.*/}} +{{- /* We can't use a materialized view here since they don't support 'create or refresh' yet.*/}} + +USE CATALOG {{"{{"}}catalog{{"}}"}}; +USE IDENTIFIER({{"{{"}}schema{{"}}"}}); CREATE OR REPLACE VIEW - IDENTIFIER(CONCAT({{"{{"}}catalog{{"}}"}}, '.', {{"{{"}}schema{{"}}"}}, '.', 'orders_daily')) + orders_daily AS SELECT order_date, count(*) AS number_of_orders FROM - IDENTIFIER(CONCAT({{"{{"}}catalog{{"}}"}}, '.', {{"{{"}}schema{{"}}"}}, '.', 'orders_raw')) + orders_raw --- During development, only process a smaller range of data -WHERE {{"{{"}}bundle_target{{"}}"}} == "prod" OR (order_date >= '2019-08-01' AND order_date < '2019-09-01') +WHERE if( + {{"{{"}}bundle_target{{"}}"}} = "prod", + true, + + -- During development, only process a smaller range of data + order_date >= '2019-08-01' AND order_date < '2019-09-01' +) GROUP BY order_date diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl index 96769062b..c73606ef1 100644 --- a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl @@ -3,8 +3,11 @@ -- The streaming table below ingests all JSON files in /databricks-datasets/retail-org/sales_orders/ -- See also https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-streaming-table.html +USE CATALOG {{"{{"}}catalog{{"}}"}}; +USE IDENTIFIER({{"{{"}}schema{{"}}"}}); + CREATE OR REFRESH STREAMING TABLE - IDENTIFIER(CONCAT({{"{{"}}catalog{{"}}"}}, '.', {{"{{"}}schema{{"}}"}}, '.', 'orders_raw')) + orders_raw AS SELECT customer_name, DATE(TIMESTAMP(FROM_UNIXTIME(TRY_CAST(order_datetime AS BIGINT)))) AS order_date, diff --git a/libs/template/testdata/random-int/template/hello.tmpl b/libs/template/testdata/random-int/template/hello.tmpl new file mode 100644 index 000000000..46dc63fb6 --- /dev/null +++ b/libs/template/testdata/random-int/template/hello.tmpl @@ -0,0 +1 @@ +{{print (random_int 10)}} diff --git a/libs/vfs/filer.go b/libs/vfs/filer.go new file mode 100644 index 000000000..54f672e06 --- /dev/null +++ b/libs/vfs/filer.go @@ -0,0 +1,66 @@ +package vfs + +import ( + "context" + "io/fs" + "path/filepath" + + "github.com/databricks/cli/libs/filer" +) + +type filerPath struct { + ctx context.Context + path string + fs FS + + construct func(path string) (filer.Filer, error) +} + +func NewFilerPath(ctx context.Context, path string, construct func(path string) (filer.Filer, error)) (Path, error) { + f, err := construct(path) + if err != nil { + return nil, err + } + + return &filerPath{ + ctx: ctx, + path: path, + fs: filer.NewFS(ctx, f).(FS), + + construct: construct, + }, nil +} + +func (f filerPath) Open(name string) (fs.File, error) { + return f.fs.Open(name) +} + +func (f filerPath) Stat(name string) (fs.FileInfo, error) { + return f.fs.Stat(name) +} + +func (f filerPath) ReadDir(name string) ([]fs.DirEntry, error) { + return f.fs.ReadDir(name) +} + +func (f filerPath) ReadFile(name string) ([]byte, error) { + return f.fs.ReadFile(name) +} + +func (f filerPath) Parent() Path { + if f.path == "/" { + return nil + } + + dir := filepath.Dir(f.path) + nf, err := NewFilerPath(f.ctx, dir, f.construct) + if err != nil { + panic(err) + } + + return nf +} + +func (f filerPath) Native() string { + return f.path +} diff --git a/libs/vfs/filer_test.go b/libs/vfs/filer_test.go new file mode 100644 index 000000000..ee1397521 --- /dev/null +++ b/libs/vfs/filer_test.go @@ -0,0 +1,79 @@ +package vfs + +import ( + "context" + "errors" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFilerPath(t *testing.T) { + ctx := context.Background() + wd, err := os.Getwd() + require.NoError(t, err) + + // Create a new filer-backed path. + p, err := NewFilerPath(ctx, filepath.FromSlash(wd), filer.NewLocalClient) + require.NoError(t, err) + + // Open self. + f, err := p.Open("filer_test.go") + require.NoError(t, err) + defer f.Close() + + // Run stat on self. + s, err := f.Stat() + require.NoError(t, err) + assert.Equal(t, "filer_test.go", s.Name()) + assert.GreaterOrEqual(t, int(s.Size()), 128) + + // Read some bytes. + buf := make([]byte, 1024) + _, err = f.Read(buf) + require.NoError(t, err) + assert.True(t, strings.HasPrefix(string(buf), "package vfs")) + + // Open non-existent file. + _, err = p.Open("doesntexist_test.go") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Stat self. + s, err = p.Stat("filer_test.go") + require.NoError(t, err) + assert.Equal(t, "filer_test.go", s.Name()) + assert.GreaterOrEqual(t, int(s.Size()), 128) + + // Stat non-existent file. + _, err = p.Stat("doesntexist_test.go") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // ReadDir self. + entries, err := p.ReadDir(".") + require.NoError(t, err) + assert.GreaterOrEqual(t, len(entries), 1) + + // ReadDir non-existent directory. + _, err = p.ReadDir("doesntexist") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // ReadFile self. + buf, err = p.ReadFile("filer_test.go") + require.NoError(t, err) + assert.True(t, strings.HasPrefix(string(buf), "package vfs")) + + // ReadFile non-existent file. + _, err = p.ReadFile("doesntexist_test.go") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Parent self. + pp := p.Parent() + require.NotNil(t, pp) + assert.Equal(t, filepath.Join(pp.Native(), "vfs"), p.Native()) +}