diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 56c8253ff..c4b47ca14 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -a7a9dc025bb80303e676bf3708942c6aa06689f1 \ No newline at end of file +7437dabb9dadee402c1fc060df4c1ce8cc5369f0 \ No newline at end of file diff --git a/.codegen/cmds-account.go.tmpl b/.codegen/cmds-account.go.tmpl index f3da7e2c8..43834b698 100644 --- a/.codegen/cmds-account.go.tmpl +++ b/.codegen/cmds-account.go.tmpl @@ -7,7 +7,7 @@ package account import ( "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" - {{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) }} + {{range .Services}}{{if and .IsAccounts (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) }} {{.SnakeName}} "github.com/databricks/cli/cmd/account/{{(.TrimPrefix "account").KebabName}}"{{end}}{{end}}{{end}} ) @@ -17,7 +17,7 @@ func New() *cobra.Command { Short: `Databricks Account Commands`, } - {{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) -}} + {{range .Services}}{{if and .IsAccounts (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) -}} cmd.AddCommand({{.SnakeName}}.New()) {{end}}{{end}}{{end}} diff --git a/.codegen/cmds-workspace.go.tmpl b/.codegen/cmds-workspace.go.tmpl index a9daa05d8..e29f05a55 100644 --- a/.codegen/cmds-workspace.go.tmpl +++ b/.codegen/cmds-workspace.go.tmpl @@ -14,14 +14,14 @@ package workspace import ( "github.com/databricks/cli/cmd/root" - {{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) }} + {{range .Services}}{{if and (not .IsAccounts) (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) }} {{.SnakeName}} "github.com/databricks/cli/cmd/workspace/{{.KebabName}}"{{end}}{{end}}{{end}} ) func All() []*cobra.Command { var out []*cobra.Command - {{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) -}} + {{range .Services}}{{if and (not .IsAccounts) (not .HasParent) (not .IsDataPlane)}}{{if not (in $excludes .KebabName) -}} out = append(out, {{.SnakeName}}.New()) {{end}}{{end}}{{end}} diff --git a/.codegen/lookup.go.tmpl b/.codegen/lookup.go.tmpl index a982f151a..7e643a90c 100644 --- a/.codegen/lookup.go.tmpl +++ b/.codegen/lookup.go.tmpl @@ -18,6 +18,11 @@ package variable "warehouses" }} +{{ $customField := + dict + "service-principals" "ApplicationId" +}} + import ( "context" "fmt" @@ -116,15 +121,10 @@ func allResolvers() *resolvers { return "", err } - return fmt.Sprint(entity{{ template "field-path" .List.NamedIdMap.IdPath }}), nil + return fmt.Sprint(entity.{{ getOrDefault $customField .KebabName ((index .List.NamedIdMap.IdPath 0).PascalName) }}), nil } {{end -}} {{- end}} return r } - - -{{- define "field-path" -}} - {{- range .}}.{{.PascalName}}{{end}} -{{- end -}} diff --git a/.codegen/service.go.tmpl b/.codegen/service.go.tmpl index a0cd02198..111745e4f 100644 --- a/.codegen/service.go.tmpl +++ b/.codegen/service.go.tmpl @@ -8,6 +8,10 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}" "github.com/spf13/cobra" + + {{range .Subservices -}} + {{.SnakeName}} "github.com/databricks/cli/cmd/{{ if .ParentService.IsAccounts }}account{{ else }}workspace{{ end }}/{{.KebabName}}" + {{end}} ) {{ $excludes := @@ -18,6 +22,7 @@ import ( "dbsql-permissions" "account-access-control-proxy" "files" + "serving-endpoints-data-plane" }} {{if not (in $excludes .KebabName) }} @@ -34,6 +39,9 @@ import ( ]{{end}}{{end}} {{define "service"}} +{{- $excludeMethods := list "put-secret" -}} +{{- $hideService := .IsPrivatePreview }} + // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var cmdOverrides []func(*cobra.Command) @@ -45,17 +53,36 @@ func New() *cobra.Command { Short: `{{.Summary | without "`"}}`, Long: `{{.Comment " " 80 | without "`"}}`, {{- end }} + {{- if not .HasParent }} GroupID: "{{ .Package.Name }}", Annotations: map[string]string{ "package": "{{ .Package.Name }}", }, - {{- if .IsPrivatePreview }} + {{- end }} + {{- if $hideService }} // This service is being previewed; hide from help output. Hidden: true, {{- end }} } + {{ if gt (len .Methods) 0 -}} + // Add methods + {{- range .Methods}} + {{- if in $excludeMethods .KebabName }} + {{- continue}} + {{- end}} + cmd.AddCommand(new{{.PascalName}}()) + {{- end}} + {{- end}} + + {{ if .HasSubservices }} + // Add subservices + {{- range .Subservices}} + cmd.AddCommand({{.SnakeName}}.New()) + {{- end}} + {{- end}} + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -67,8 +94,7 @@ func New() *cobra.Command { {{- $serviceName := .KebabName -}} {{range .Methods}} -{{- $excludes := list "put-secret" -}} -{{if in $excludes .KebabName }} +{{if in $excludeMethods .KebabName }} {{continue}} {{end}} // start {{.KebabName}} command @@ -121,7 +147,14 @@ func new{{.PascalName}}() *cobra.Command { {{- end}} {{end}} - {{- $excludeFromPrompts := list "workspace get-status" -}} + {{- $excludeFromPrompts := list + "workspace get-status" + "provider-exchanges get" + "provider-exchanges delete" + "provider-exchanges delete-listing-from-exchange" + "provider-exchanges list-exchanges-for-listing" + "provider-exchanges list-listings-for-exchange" + -}} {{- $fullCommandName := (print $serviceName " " .KebabName) -}} {{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }} @@ -159,7 +192,8 @@ func new{{.PascalName}}() *cobra.Command { {{- end -}} ` {{- end }} - {{- if .IsPrivatePreview }} + {{/* Don't hide commands if the service itself is already hidden. */}} + {{- if and (not $hideService) .IsPrivatePreview }} // This command is being previewed; hide from help output. cmd.Hidden = true @@ -170,7 +204,7 @@ func new{{.PascalName}}() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { {{- if $hasDifferentArgsWithJsonFlag }} if cmd.Flags().Changed("json") { - err := cobra.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args) + err := root.ExactArgs({{len .Request.RequiredPathFields}})(cmd, args) if err != nil { {{- if eq 0 (len .Request.RequiredPathFields) }} return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide{{- range $index, $field := .Request.RequiredFields}}{{if $index}},{{end}} '{{$field.Name}}'{{end}} in your JSON input") @@ -182,7 +216,7 @@ func new{{.PascalName}}() *cobra.Command { } {{- end }} {{- if $hasRequiredArgs }} - check := cobra.ExactArgs({{len .RequiredPositionalArguments}}) + check := root.ExactArgs({{len .RequiredPositionalArguments}}) return check(cmd, args) {{- else}} return nil @@ -242,7 +276,7 @@ func new{{.PascalName}}() *cobra.Command { return err } if {{.CamelName}}SkipWait { - {{if .Response -}} + {{if not .Response.IsEmpty -}} return cmdio.Render(ctx, wait.Response) {{- else -}} return nil @@ -291,25 +325,34 @@ func new{{.PascalName}}() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(new{{.PascalName}}()) - }) -} {{end}} // end service {{.Name}}{{end}} {{- define "method-call" -}} - {{if .Response}}response, err :={{else}}err ={{end}} {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.PascalName}}{{if .Pagination}}All{{end}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}}) + {{if not .Response.IsEmpty -}} + response{{ if not .Pagination}}, err{{end}} := + {{- else -}} + err = + {{- end}} + {{- if .Service.IsAccounts}}a{{else}}w{{end}}. + {{- if .Service.HasParent }} + {{- (.Service.ParentService.TrimPrefix "account").PascalName }}. + {{- (.Service.TrimPrefix "account").PascalName}}(). + {{- else}} + {{- (.Service.TrimPrefix "account").PascalName}}. + {{- end}} + {{- .PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}}) + {{- if not (and .Response .Pagination) }} if err != nil { return err } - {{ if .Response -}} + {{- end}} + {{ if not .Response.IsEmpty -}} {{- if .IsResponseByteStream -}} defer response.{{.ResponseBodyField.PascalName}}.Close() - return cmdio.RenderReader(ctx, response.{{.ResponseBodyField.PascalName}}) + return cmdio.Render{{ if .Pagination}}Iterator{{end}}(ctx, response.{{.ResponseBodyField.PascalName}}) {{- else -}} - return cmdio.Render(ctx, response) + return cmdio.Render{{ if .Pagination}}Iterator{{end}}(ctx, response) {{- end -}} {{ else -}} return nil diff --git a/.gitattributes b/.gitattributes index 7a1750caa..c11257e9e 100755 --- a/.gitattributes +++ b/.gitattributes @@ -4,8 +4,10 @@ cmd/account/billable-usage/billable-usage.go linguist-generated=true cmd/account/budgets/budgets.go linguist-generated=true cmd/account/cmd.go linguist-generated=true cmd/account/credentials/credentials.go linguist-generated=true +cmd/account/csp-enablement-account/csp-enablement-account.go linguist-generated=true cmd/account/custom-app-integration/custom-app-integration.go linguist-generated=true cmd/account/encryption-keys/encryption-keys.go linguist-generated=true +cmd/account/esm-enablement-account/esm-enablement-account.go linguist-generated=true cmd/account/groups/groups.go linguist-generated=true cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true cmd/account/log-delivery/log-delivery.go linguist-generated=true @@ -14,6 +16,7 @@ cmd/account/metastores/metastores.go linguist-generated=true cmd/account/network-connectivity/network-connectivity.go linguist-generated=true cmd/account/networks/networks.go linguist-generated=true cmd/account/o-auth-published-apps/o-auth-published-apps.go linguist-generated=true +cmd/account/personal-compute/personal-compute.go linguist-generated=true cmd/account/private-access/private-access.go linguist-generated=true cmd/account/published-app-integration/published-app-integration.go linguist-generated=true cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true @@ -28,17 +31,26 @@ cmd/account/workspaces/workspaces.go linguist-generated=true cmd/workspace/alerts/alerts.go linguist-generated=true cmd/workspace/apps/apps.go linguist-generated=true cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true +cmd/workspace/automatic-cluster-update/automatic-cluster-update.go linguist-generated=true cmd/workspace/catalogs/catalogs.go linguist-generated=true cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true cmd/workspace/clusters/clusters.go linguist-generated=true cmd/workspace/cmd.go linguist-generated=true +cmd/workspace/compliance-security-profile/compliance-security-profile.go linguist-generated=true cmd/workspace/connections/connections.go linguist-generated=true +cmd/workspace/consumer-fulfillments/consumer-fulfillments.go linguist-generated=true +cmd/workspace/consumer-installations/consumer-installations.go linguist-generated=true +cmd/workspace/consumer-listings/consumer-listings.go linguist-generated=true +cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go linguist-generated=true +cmd/workspace/consumer-providers/consumer-providers.go linguist-generated=true cmd/workspace/credentials-manager/credentials-manager.go linguist-generated=true cmd/workspace/current-user/current-user.go linguist-generated=true cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true cmd/workspace/dashboards/dashboards.go linguist-generated=true cmd/workspace/data-sources/data-sources.go linguist-generated=true +cmd/workspace/default-namespace/default-namespace.go linguist-generated=true +cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go linguist-generated=true cmd/workspace/experiments/experiments.go linguist-generated=true cmd/workspace/external-locations/external-locations.go linguist-generated=true cmd/workspace/functions/functions.go linguist-generated=true @@ -55,10 +67,20 @@ cmd/workspace/libraries/libraries.go linguist-generated=true cmd/workspace/metastores/metastores.go linguist-generated=true cmd/workspace/model-registry/model-registry.go linguist-generated=true cmd/workspace/model-versions/model-versions.go linguist-generated=true +cmd/workspace/online-tables/online-tables.go linguist-generated=true +cmd/workspace/permission-migration/permission-migration.go linguist-generated=true cmd/workspace/permissions/permissions.go linguist-generated=true cmd/workspace/pipelines/pipelines.go linguist-generated=true cmd/workspace/policy-families/policy-families.go linguist-generated=true +cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true +cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true +cmd/workspace/provider-files/provider-files.go linguist-generated=true +cmd/workspace/provider-listings/provider-listings.go linguist-generated=true +cmd/workspace/provider-personalization-requests/provider-personalization-requests.go linguist-generated=true +cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go linguist-generated=true +cmd/workspace/provider-providers/provider-providers.go linguist-generated=true cmd/workspace/providers/providers.go linguist-generated=true +cmd/workspace/quality-monitors/quality-monitors.go linguist-generated=true cmd/workspace/queries/queries.go linguist-generated=true cmd/workspace/query-history/query-history.go linguist-generated=true cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true @@ -66,6 +88,7 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr cmd/workspace/recipients/recipients.go linguist-generated=true cmd/workspace/registered-models/registered-models.go linguist-generated=true cmd/workspace/repos/repos.go linguist-generated=true +cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true cmd/workspace/schemas/schemas.go linguist-generated=true cmd/workspace/secrets/secrets.go linguist-generated=true cmd/workspace/service-principals/service-principals.go linguist-generated=true diff --git a/.github/workflows/publish-winget.yml b/.github/workflows/publish-winget.yml new file mode 100644 index 000000000..19603e669 --- /dev/null +++ b/.github/workflows/publish-winget.yml @@ -0,0 +1,16 @@ +name: publish-winget + +on: + workflow_dispatch: + +jobs: + publish-to-winget-pkgs: + runs-on: windows-latest + environment: release + steps: + - uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # https://github.com/vedantmgoyal2009/winget-releaser/releases/tag/v2 + with: + identifier: Databricks.DatabricksCLI + installers-regex: 'windows_.*-signed\.zip$' # Only signed Windows releases + token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }} + fork-user: eng-dev-ecosystem-bot diff --git a/.github/workflows/push.yml b/.github/workflows/push.yml index 26f85982f..08edfb9da 100644 --- a/.github/workflows/push.yml +++ b/.github/workflows/push.yml @@ -33,10 +33,10 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x - name: Setup Python - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.9' @@ -56,7 +56,7 @@ jobs: run: make test - name: Publish test coverage - uses: codecov/codecov-action@v3 + uses: codecov/codecov-action@v4 fmt: runs-on: ubuntu-latest @@ -68,7 +68,7 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x # No need to download cached dependencies when running gofmt. cache: false @@ -89,3 +89,29 @@ jobs: run: | # Exit with status code 1 if there are differences (i.e. unformatted files) git diff --exit-code + + validate-bundle-schema: + runs-on: ubuntu-latest + + steps: + - name: Checkout + uses: actions/checkout@v4 + + - name: Setup Go + uses: actions/setup-go@v5 + with: + go-version: 1.22.x + + # Github repo: https://github.com/ajv-validator/ajv-cli + - name: Install ajv-cli + run: npm install -g ajv-cli@5.0.0 + + # Assert that the generated bundle schema is a valid JSON schema by using + # ajv-cli to validate it against a sample configuration file. + # By default the ajv-cli runs in strict mode which will fail if the schema + # itself is not valid. Strict mode is more strict than the JSON schema + # specification. See for details: https://ajv.js.org/options.html#strict-mode-options + - name: Validate bundle schema + run: | + go run main.go bundle schema > schema.json + ajv -s schema.json -d ./bundle/tests/basic/databricks.yml diff --git a/.github/workflows/release-snapshot.yml b/.github/workflows/release-snapshot.yml index d092a6693..defd1c535 100644 --- a/.github/workflows/release-snapshot.yml +++ b/.github/workflows/release-snapshot.yml @@ -21,33 +21,41 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x + + # The default cache key for this action considers only the `go.sum` file. + # We include .goreleaser.yaml here to differentiate from the cache used by the push action + # that runs unit tests. This job produces and uses a different cache. + cache-dependency-path: | + go.sum + .goreleaser.yaml - name: Hide snapshot tag to outsmart GoReleaser run: git tag -d snapshot || true - name: Run GoReleaser - uses: goreleaser/goreleaser-action@v4 + id: releaser + uses: goreleaser/goreleaser-action@v6 with: - version: latest - args: release --snapshot + version: ~> v2 + args: release --snapshot --skip docker - name: Upload macOS binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cli_darwin_snapshot path: | dist/*_darwin_*/ - name: Upload Linux binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cli_linux_snapshot path: | dist/*_linux_*/ - name: Upload Windows binaries - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: cli_windows_snapshot path: | diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 044324edc..531fb39bf 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -22,13 +22,33 @@ jobs: - name: Setup Go uses: actions/setup-go@v5 with: - go-version: 1.21.x + go-version: 1.22.x + + # The default cache key for this action considers only the `go.sum` file. + # We include .goreleaser.yaml here to differentiate from the cache used by the push action + # that runs unit tests. This job produces and uses a different cache. + cache-dependency-path: | + go.sum + .goreleaser.yaml + + # Log into the GitHub Container Registry. The goreleaser action will create + # the docker images and push them to the GitHub Container Registry. + - uses: "docker/login-action@v3" + with: + registry: "ghcr.io" + username: "${{ github.actor }}" + password: "${{ secrets.GITHUB_TOKEN }}" + + # QEMU is required to build cross platform docker images using buildx. + # It allows virtualization of the CPU architecture at the application level. + - name: Set up QEMU dependency + uses: docker/setup-qemu-action@v3 - name: Run GoReleaser id: releaser - uses: goreleaser/goreleaser-action@v4 + uses: goreleaser/goreleaser-action@v6 with: - version: latest + version: ~> v2 args: release env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} @@ -71,7 +91,7 @@ jobs: with: github-token: ${{ secrets.DECO_GITHUB_TOKEN }} script: | - let artifacts = JSON.parse('${{ needs.goreleaser.outputs.artifacts }}') + let artifacts = ${{ needs.goreleaser.outputs.artifacts }} artifacts = artifacts.filter(a => a.type == "Archive") artifacts = new Map( artifacts.map(a => [ @@ -117,14 +137,3 @@ jobs: version: "${{ env.VERSION }}", } }); - - publish-to-winget-pkgs: - needs: goreleaser - runs-on: windows-latest - environment: release - steps: - - uses: vedantmgoyal2009/winget-releaser@93fd8b606a1672ec3e5c6c3bb19426be68d1a8b0 # https://github.com/vedantmgoyal2009/winget-releaser/releases/tag/v2 - with: - identifier: Databricks.DatabricksCLI - installers-regex: 'windows_.*\.zip$' # Only windows releases - token: ${{ secrets.ENG_DEV_ECOSYSTEM_BOT_TOKEN }} diff --git a/.goreleaser.yaml b/.goreleaser.yaml index 0cf87a9ce..3f0bdb2c5 100644 --- a/.goreleaser.yaml +++ b/.goreleaser.yaml @@ -1,6 +1,9 @@ +version: 2 + before: hooks: - - go mod tidy + - go mod download + builds: - env: - CGO_ENABLED=0 @@ -36,6 +39,7 @@ builds: - amd64 - arm64 binary: databricks + archives: - format: zip @@ -45,11 +49,54 @@ archives: # file name then additional logic to clean up older builds would be needed. name_template: 'databricks_cli_{{ if not .IsSnapshot }}{{ .Version }}_{{ end }}{{ .Os }}_{{ .Arch }}' +dockers: + - id: arm64 + goarch: arm64 + # We need to use buildx to build arm64 image on a amd64 machine. + use: buildx + image_templates: + # Docker tags can't have "+" in them, so we replace it with "-" + - 'ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-arm64' + - 'ghcr.io/databricks/cli:latest-arm64' + build_flag_templates: + - "--build-arg=ARCH=arm64" + - "--platform=linux/arm64" + extra_files: + - "./docker/config.tfrc" + - "./docker/setup.sh" + + - id: amd64 + goarch: amd64 + use: buildx + image_templates: + # Docker tags can't have "+" in them, so we replace it with "-" + - 'ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-amd64' + - 'ghcr.io/databricks/cli:latest-amd64' + build_flag_templates: + - "--build-arg=ARCH=amd64" + - "--platform=linux/amd64" + extra_files: + - "./docker/config.tfrc" + - "./docker/setup.sh" + +docker_manifests: + - name_template: ghcr.io/databricks/cli:{{replace .Version "+" "-"}} + image_templates: + - ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-amd64 + - ghcr.io/databricks/cli:{{replace .Version "+" "-"}}-arm64 + - name_template: ghcr.io/databricks/cli:latest + image_templates: + - ghcr.io/databricks/cli:latest-amd64 + - ghcr.io/databricks/cli:latest-arm64 + + checksum: name_template: 'databricks_cli_{{ .Version }}_SHA256SUMS' algorithm: sha256 + snapshot: name_template: '{{ incpatch .Version }}-dev+{{ .ShortCommit }}' + changelog: sort: asc filters: diff --git a/.mockery.yaml b/.mockery.yaml new file mode 100644 index 000000000..bc9c051cd --- /dev/null +++ b/.mockery.yaml @@ -0,0 +1,10 @@ +with-expecter: true +filename: "mock_{{.InterfaceName | snakecase}}.go" +mockname: "Mock{{.InterfaceName}}" +outpkg: "mock{{.PackageName}}" +packages: + github.com/databricks/cli/libs/filer: + interfaces: + Filer: + config: + dir: "internal/mocks/libs/filer" diff --git a/CHANGELOG.md b/CHANGELOG.md index 3dc700a8f..eb902e0b4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,526 @@ # Version changelog +## 0.223.2 + +Bundles: + * Override complex variables with target overrides instead of merging ([#1567](https://github.com/databricks/cli/pull/1567)). + * Rewrite local path for libraries in foreach tasks ([#1569](https://github.com/databricks/cli/pull/1569)). + * Change SetVariables mutator to mutate dynamic configuration instead ([#1573](https://github.com/databricks/cli/pull/1573)). + * Return early in bundle destroy if no deployment exists ([#1581](https://github.com/databricks/cli/pull/1581)). + * Let notebook detection code use underlying metadata if available ([#1574](https://github.com/databricks/cli/pull/1574)). + * Remove schema override for variable default value ([#1536](https://github.com/databricks/cli/pull/1536)). + * Print diagnostics in 'bundle deploy' ([#1579](https://github.com/databricks/cli/pull/1579)). + +Internal: + * Update actions/upload-artifact to v4 ([#1559](https://github.com/databricks/cli/pull/1559)). + * Use Go 1.22 to build and test ([#1562](https://github.com/databricks/cli/pull/1562)). + * Move bespoke status call to main workspace files filer ([#1570](https://github.com/databricks/cli/pull/1570)). + * Add new template ([#1578](https://github.com/databricks/cli/pull/1578)). + * Add regression tests for CLI error output ([#1566](https://github.com/databricks/cli/pull/1566)). + +Dependency updates: + * Bump golang.org/x/mod from 0.18.0 to 0.19.0 ([#1576](https://github.com/databricks/cli/pull/1576)). + * Bump golang.org/x/term from 0.21.0 to 0.22.0 ([#1577](https://github.com/databricks/cli/pull/1577)). + +## 0.223.1 + +This bugfix release fixes missing error messages in v0.223.0. + +CLI: + * Fix logic error in [#1532](https://github.com/databricks/cli/pull/1532) ([#1564](https://github.com/databricks/cli/pull/1564)). + + +## 0.223.0 + +Bundles: + +As of this release you can interact with bundles when running the CLI on DBR (e.g. via the Web Terminal). + + * Fix non-default project names not working in dbt-sql template ([#1500](https://github.com/databricks/cli/pull/1500)). + * Improve `bundle validate` output ([#1532](https://github.com/databricks/cli/pull/1532)). + * Fixed resolving variable references inside slice variable ([#1550](https://github.com/databricks/cli/pull/1550)). + * Fixed bundle not loading when empty variable is defined ([#1552](https://github.com/databricks/cli/pull/1552)). + * Use `vfs.Path` for filesystem interaction ([#1554](https://github.com/databricks/cli/pull/1554)). + * Replace `vfs.Path` with extension-aware filer when running on DBR ([#1556](https://github.com/databricks/cli/pull/1556)). + +Internal: + * merge.Override: Fix handling of dyn.NilValue ([#1530](https://github.com/databricks/cli/pull/1530)). + * Compare `.Kind()` instead of direct equality checks on a `dyn.Value` ([#1520](https://github.com/databricks/cli/pull/1520)). + * PythonMutator: register product in user agent extra ([#1533](https://github.com/databricks/cli/pull/1533)). + * Ignore `dyn.NilValue` when traversing value from `dyn.Map` ([#1547](https://github.com/databricks/cli/pull/1547)). + * Add extra tests for the sync block ([#1548](https://github.com/databricks/cli/pull/1548)). + * PythonMutator: add diagnostics ([#1531](https://github.com/databricks/cli/pull/1531)). + * PythonMutator: support omitempty in PyDABs ([#1513](https://github.com/databricks/cli/pull/1513)). + * PythonMutator: allow insert 'resources' and 'resources.jobs' ([#1555](https://github.com/databricks/cli/pull/1555)). + +## 0.222.0 + +CLI: + * Add link to documentation for Homebrew installation to README ([#1505](https://github.com/databricks/cli/pull/1505)). + * Fix `databricks configure` to use `DATABRICKS_CONFIG_FILE` environment variable if exists as config file ([#1325](https://github.com/databricks/cli/pull/1325)). + +Bundles: + +The Terraform upgrade to v1.48.0 includes a fix for library order not being respected. + + * Fix conditional in query in `default-sql` template ([#1479](https://github.com/databricks/cli/pull/1479)). + * Remove user credentials specified in the Git origin URL ([#1494](https://github.com/databricks/cli/pull/1494)). + * Serialize dynamic value for `bundle validate` output ([#1499](https://github.com/databricks/cli/pull/1499)). + * Override variables with lookup value even if values has default value set ([#1504](https://github.com/databricks/cli/pull/1504)). + * Pause quality monitors when "mode: development" is used ([#1481](https://github.com/databricks/cli/pull/1481)). + * Return `fs.ModeDir` for Git folders in the workspace ([#1521](https://github.com/databricks/cli/pull/1521)). + * Upgrade TF provider to 1.48.0 ([#1527](https://github.com/databricks/cli/pull/1527)). + * Added support for complex variables ([#1467](https://github.com/databricks/cli/pull/1467)). + +Internal: + * Add randIntn function ([#1475](https://github.com/databricks/cli/pull/1475)). + * Avoid multiple file tree traversals on bundle deploy ([#1493](https://github.com/databricks/cli/pull/1493)). + * Clean up unused code ([#1502](https://github.com/databricks/cli/pull/1502)). + * Use `dyn.InvalidValue` to indicate absence ([#1507](https://github.com/databricks/cli/pull/1507)). + * Add ApplyPythonMutator ([#1430](https://github.com/databricks/cli/pull/1430)). + * Set bool pointer to disable lock ([#1516](https://github.com/databricks/cli/pull/1516)). + * Allow the any type to be set to nil in `convert.FromTyped` ([#1518](https://github.com/databricks/cli/pull/1518)). + * Properly deal with nil values in `convert.FromTyped` ([#1511](https://github.com/databricks/cli/pull/1511)). + * Return `dyn.InvalidValue` instead of `dyn.NilValue` when errors happen ([#1514](https://github.com/databricks/cli/pull/1514)). + * PythonMutator: replace stdin/stdout with files ([#1512](https://github.com/databricks/cli/pull/1512)). + * Add context type and value to path rewriting ([#1525](https://github.com/databricks/cli/pull/1525)). + +API Changes: + * Added schedule CRUD commands to `databricks lakeview`. + * Added subscription CRUD commands to `databricks lakeview`. + * Added `databricks apps start` command. + +OpenAPI commit 7437dabb9dadee402c1fc060df4c1ce8cc5369f0 (2024-06-24) + +Dependency updates: + * Bump golang.org/x/text from 0.15.0 to 0.16.0 ([#1482](https://github.com/databricks/cli/pull/1482)). + * Bump golang.org/x/term from 0.20.0 to 0.21.0 ([#1483](https://github.com/databricks/cli/pull/1483)). + * Bump golang.org/x/mod from 0.17.0 to 0.18.0 ([#1484](https://github.com/databricks/cli/pull/1484)). + * Bump golang.org/x/oauth2 from 0.20.0 to 0.21.0 ([#1485](https://github.com/databricks/cli/pull/1485)). + * Bump github.com/briandowns/spinner from 1.23.0 to 1.23.1 ([#1495](https://github.com/databricks/cli/pull/1495)). + * Bump github.com/spf13/cobra from 1.8.0 to 1.8.1 ([#1496](https://github.com/databricks/cli/pull/1496)). + * Bump github.com/databricks/databricks-sdk-go from 0.42.0 to 0.43.0 ([#1522](https://github.com/databricks/cli/pull/1522)). + +## 0.221.1 + +Bundles: + +This releases fixes an issue introduced in v0.221.0 where managing jobs with a single-node cluster would fail. + + * Fix SQL schema selection in default-sql template ([#1471](https://github.com/databricks/cli/pull/1471)). + * Copy-editing for SQL templates ([#1474](https://github.com/databricks/cli/pull/1474)). + * Upgrade TF provider to 1.47.0 ([#1476](https://github.com/databricks/cli/pull/1476)). + +Internal: + * Use latest version of goreleaser action ([#1477](https://github.com/databricks/cli/pull/1477)). + + + +## 0.221.0 + +CLI: + * Update OpenAPI spec ([#1466](https://github.com/databricks/cli/pull/1466)). + +Bundles: + * Upgrade TF provider to 1.46.0 ([#1460](https://github.com/databricks/cli/pull/1460)). + * Add support for Lakehouse monitoring ([#1307](https://github.com/databricks/cli/pull/1307)). + * Make dbt-sql and default-sql templates public ([#1463](https://github.com/databricks/cli/pull/1463)). + +Internal: + * Abstract over filesystem interaction with libs/vfs ([#1452](https://github.com/databricks/cli/pull/1452)). + * Add `filer.Filer` to read notebooks from WSFS without omitting their extension ([#1457](https://github.com/databricks/cli/pull/1457)). + * Fix listing notebooks in a subdirectory ([#1468](https://github.com/databricks/cli/pull/1468)). + +API Changes: + * Changed `databricks account storage-credentials list` command to return . + * Added `databricks consumer-listings batch-get` command. + * Added `databricks consumer-providers batch-get` command. + * Removed `databricks apps create-deployment` command. + * Added `databricks apps deploy` command. + +OpenAPI commit 37b925eba37dfb3d7e05b6ba2d458454ce62d3a0 (2024-06-03) + +Dependency updates: + * Bump github.com/hashicorp/go-version from 1.6.0 to 1.7.0 ([#1454](https://github.com/databricks/cli/pull/1454)). + * Bump github.com/hashicorp/hc-install from 0.6.4 to 0.7.0 ([#1453](https://github.com/databricks/cli/pull/1453)). + +## 0.220.0 + +CLI: + * Add line about Docker installation to README.md ([#1363](https://github.com/databricks/cli/pull/1363)). + * Improve token refresh flow ([#1434](https://github.com/databricks/cli/pull/1434)). + +Bundles: + * Upgrade Terraform provider to v1.42.0 ([#1418](https://github.com/databricks/cli/pull/1418)). + * Upgrade Terraform provider to v1.43.0 ([#1429](https://github.com/databricks/cli/pull/1429)). + * Don't merge-in remote resources during deployments ([#1432](https://github.com/databricks/cli/pull/1432)). + * Remove dependency on `ConfigFilePath` from path translation mutator ([#1437](https://github.com/databricks/cli/pull/1437)). + * Add `merge.Override` transform ([#1428](https://github.com/databricks/cli/pull/1428)). + * Fixed panic when loading incorrectly defined jobs ([#1402](https://github.com/databricks/cli/pull/1402)). + * Add more tests for `merge.Override` ([#1439](https://github.com/databricks/cli/pull/1439)). + * Fixed seg fault when specifying environment key for tasks ([#1443](https://github.com/databricks/cli/pull/1443)). + * Fix conversion of zero valued scalar pointers to a dynamic value ([#1433](https://github.com/databricks/cli/pull/1433)). + +Internal: + * Don't hide commands of services that are already hidden ([#1438](https://github.com/databricks/cli/pull/1438)). + +API Changes: + * Renamed `lakehouse-monitors` command group to `quality-monitors`. + * Added `apps` command group. + * Renamed `csp-enablement` command group to `compliance-security-profile`. + * Renamed `esm-enablement` command group to `enhanced-security-monitoring`. + * Added `databricks vector-search-indexes scan-index` command. + +OpenAPI commit 7eb5ad9a2ed3e3f1055968a2d1014ac92c06fe92 (2024-05-21) + +Dependency updates: + * Bump golang.org/x/text from 0.14.0 to 0.15.0 ([#1419](https://github.com/databricks/cli/pull/1419)). + * Bump golang.org/x/oauth2 from 0.19.0 to 0.20.0 ([#1421](https://github.com/databricks/cli/pull/1421)). + * Bump golang.org/x/term from 0.19.0 to 0.20.0 ([#1422](https://github.com/databricks/cli/pull/1422)). + * Bump github.com/databricks/databricks-sdk-go from 0.39.0 to 0.40.1 ([#1431](https://github.com/databricks/cli/pull/1431)). + * Bump github.com/fatih/color from 1.16.0 to 1.17.0 ([#1441](https://github.com/databricks/cli/pull/1441)). + * Bump github.com/hashicorp/terraform-json from 0.21.0 to 0.22.1 ([#1440](https://github.com/databricks/cli/pull/1440)). + * Bump github.com/hashicorp/terraform-exec from 0.20.0 to 0.21.0 ([#1442](https://github.com/databricks/cli/pull/1442)). + * Update Go SDK to v0.41.0 ([#1445](https://github.com/databricks/cli/pull/1445)). + +## 0.219.0 + +Bundles: + * Don't fail while parsing outdated terraform state ([#1404](https://github.com/databricks/cli/pull/1404)). + * Annotate DLT pipelines when deployed using DABs ([#1410](https://github.com/databricks/cli/pull/1410)). + + +API Changes: + * Changed `databricks libraries cluster-status` command. New request type is compute.ClusterStatus. + * Changed `databricks libraries cluster-status` command to return . + * Added `databricks serving-endpoints get-open-api` command. + +OpenAPI commit 21f9f1482f9d0d15228da59f2cd9f0863d2a6d55 (2024-04-23) +Dependency updates: + * Bump github.com/databricks/databricks-sdk-go from 0.38.0 to 0.39.0 ([#1405](https://github.com/databricks/cli/pull/1405)). + +## 0.218.1 + +This is a bugfix release. + +CLI: + * Pass `DATABRICKS_CONFIG_FILE` for `auth profiles` ([#1394](https://github.com/databricks/cli/pull/1394)). + +Bundles: + * Show a better error message for using wheel tasks with older DBR versions ([#1373](https://github.com/databricks/cli/pull/1373)). + * Allow variable references in non-string fields in the JSON schema ([#1398](https://github.com/databricks/cli/pull/1398)). + * Fix variable overrides in targets for non-string variables ([#1397](https://github.com/databricks/cli/pull/1397)). + * Fix bundle schema for variables ([#1396](https://github.com/databricks/cli/pull/1396)). + * Fix bundle documentation URL ([#1399](https://github.com/databricks/cli/pull/1399)). + +Internal: + * Removed autogenerated docs for the CLI commands ([#1392](https://github.com/databricks/cli/pull/1392)). + * Remove `JSON.parse` call from homebrew-tap action ([#1393](https://github.com/databricks/cli/pull/1393)). + * Ensure that Python dependencies are installed during upgrade ([#1390](https://github.com/databricks/cli/pull/1390)). + + + +## 0.218.0 + +This release marks the general availability of Databricks Asset Bundles. + +CLI: + * Publish Docker images ([#1353](https://github.com/databricks/cli/pull/1353)). + * Add support for multi-arch Docker images ([#1362](https://github.com/databricks/cli/pull/1362)). + * Do not prefill https:// in prompt for Databricks Host ([#1364](https://github.com/databricks/cli/pull/1364)). + * Add better documentation for the `auth login` command ([#1366](https://github.com/databricks/cli/pull/1366)). + * Add URLs for authentication documentation to the auth command help ([#1365](https://github.com/databricks/cli/pull/1365)). + +Bundles: + * Fix compute override for foreach tasks ([#1357](https://github.com/databricks/cli/pull/1357)). + * Transform artifact files source patterns in build not upload stage ([#1359](https://github.com/databricks/cli/pull/1359)). + * Convert between integer and float in normalization ([#1371](https://github.com/databricks/cli/pull/1371)). + * Disable locking for development mode ([#1302](https://github.com/databricks/cli/pull/1302)). + * Resolve variable references inside variable lookup fields ([#1368](https://github.com/databricks/cli/pull/1368)). + * Added validate mutator to surface additional bundle warnings ([#1352](https://github.com/databricks/cli/pull/1352)). + * Upgrade terraform-provider-databricks to 1.40.0 ([#1376](https://github.com/databricks/cli/pull/1376)). + * Print host in `bundle validate` when passed via profile or environment variables ([#1378](https://github.com/databricks/cli/pull/1378)). + * Cleanup remote file path on bundle destroy ([#1374](https://github.com/databricks/cli/pull/1374)). + * Add docs URL for `run_as` in error message ([#1381](https://github.com/databricks/cli/pull/1381)). + * Enable job queueing by default ([#1385](https://github.com/databricks/cli/pull/1385)). + * Added support for job environments ([#1379](https://github.com/databricks/cli/pull/1379)). + * Processing and completion of positional args to bundle run ([#1120](https://github.com/databricks/cli/pull/1120)). + * Add legacy option for `run_as` ([#1384](https://github.com/databricks/cli/pull/1384)). + +API Changes: + * Changed `databricks lakehouse-monitors cancel-refresh` command with new required argument order. + * Changed `databricks lakehouse-monitors create` command with new required argument order. + * Changed `databricks lakehouse-monitors delete` command with new required argument order. + * Changed `databricks lakehouse-monitors get` command with new required argument order. + * Changed `databricks lakehouse-monitors get-refresh` command with new required argument order. + * Changed `databricks lakehouse-monitors list-refreshes` command with new required argument order. + * Changed `databricks lakehouse-monitors run-refresh` command with new required argument order. + * Changed `databricks lakehouse-monitors update` command with new required argument order. + * Changed `databricks account workspace-assignment update` command to return response. + +OpenAPI commit 94684175b8bd65f8701f89729351f8069e8309c9 (2024-04-11) + +Dependency updates: + * Bump github.com/databricks/databricks-sdk-go from 0.37.0 to 0.38.0 ([#1361](https://github.com/databricks/cli/pull/1361)). + * Bump golang.org/x/net from 0.22.0 to 0.23.0 ([#1380](https://github.com/databricks/cli/pull/1380)). + +## 0.217.1 + +CLI: + * Don't attempt auth in `auth profiles --skip-validate` ([#1282](https://github.com/databricks/cli/pull/1282)). + * Fixed typo in error template for auth describe ([#1341](https://github.com/databricks/cli/pull/1341)). + +Bundles: + * Correctly transform libraries in for_each_task block ([#1340](https://github.com/databricks/cli/pull/1340)). + * Do not emit warning on YAML anchor blocks ([#1354](https://github.com/databricks/cli/pull/1354)). + * Fixed pre-init script order ([#1348](https://github.com/databricks/cli/pull/1348)). + * Execute preinit after entry point to make sure scripts are loaded ([#1351](https://github.com/databricks/cli/pull/1351)). + + +Dependency updates: + * Bump internal terraform provider version to `1.39` ([#1339](https://github.com/databricks/cli/pull/1339)). + * Bump golang.org/x/term from 0.18.0 to 0.19.0 ([#1343](https://github.com/databricks/cli/pull/1343)). + * Bump github.com/hashicorp/hc-install from 0.6.3 to 0.6.4 ([#1344](https://github.com/databricks/cli/pull/1344)). + * Bump golang.org/x/mod from 0.16.0 to 0.17.0 ([#1345](https://github.com/databricks/cli/pull/1345)). + * Bump golang.org/x/oauth2 from 0.18.0 to 0.19.0 ([#1347](https://github.com/databricks/cli/pull/1347)). + * Bump golang.org/x/sync from 0.6.0 to 0.7.0 ([#1346](https://github.com/databricks/cli/pull/1346)). + +## 0.217.0 + +Breaking Change: + * Add allow list for resources when bundle `run_as` is set ([#1233](https://github.com/databricks/cli/pull/1233)). + * Make bundle validation print text output by default ([#1335](https://github.com/databricks/cli/pull/1335)). + +CLI: + * Added `auth describe` command ([#1244](https://github.com/databricks/cli/pull/1244)). + * Fixed message for successful auth describe run ([#1336](https://github.com/databricks/cli/pull/1336)). + +Bundles: + * Use UserName field to identify if service principal is used ([#1310](https://github.com/databricks/cli/pull/1310)). + * Allow unknown properties in the config file for template initialization ([#1315](https://github.com/databricks/cli/pull/1315)). + * Remove support for DATABRICKS_BUNDLE_INCLUDES ([#1317](https://github.com/databricks/cli/pull/1317)). + * Make `bundle.deployment` optional in the bundle schema ([#1321](https://github.com/databricks/cli/pull/1321)). + * Fix the generated DABs JSON schema ([#1322](https://github.com/databricks/cli/pull/1322)). + * Make bundle loaders return diagnostics ([#1319](https://github.com/databricks/cli/pull/1319)). + * Add `bundle debug terraform` command ([#1294](https://github.com/databricks/cli/pull/1294)). + * Allow specifying CLI version constraints required to run the bundle ([#1320](https://github.com/databricks/cli/pull/1320)). + +Internal: + * Retain location information of variable reference ([#1333](https://github.com/databricks/cli/pull/1333)). + * Define `dyn.Mapping` to represent maps ([#1301](https://github.com/databricks/cli/pull/1301)). + * Return `diag.Diagnostics` from mutators ([#1305](https://github.com/databricks/cli/pull/1305)). + * Fix flaky test in `libs/process` ([#1314](https://github.com/databricks/cli/pull/1314)). + * Move path field to bundle type ([#1316](https://github.com/databricks/cli/pull/1316)). + * Load bundle configuration from mutator ([#1318](https://github.com/databricks/cli/pull/1318)). + * Return diagnostics from `config.Load` ([#1324](https://github.com/databricks/cli/pull/1324)). + * Return warning for nil primitive types during normalization ([#1329](https://github.com/databricks/cli/pull/1329)). + * Include `dyn.Path` in normalization warnings and errors ([#1332](https://github.com/databricks/cli/pull/1332)). + * Make normalization return warnings instead of errors ([#1334](https://github.com/databricks/cli/pull/1334)). + +API Changes: + * Added `databricks lakeview migrate` command. + * Added `databricks lakeview unpublish` command. + * Changed `databricks ip-access-lists get` command . New request type is . + +OpenAPI commit e316cc3d78d087522a74650e26586088da9ac8cb (2024-04-03) +Dependency updates: + * Bump github.com/databricks/databricks-sdk-go from 0.36.0 to 0.37.0 ([#1326](https://github.com/databricks/cli/pull/1326)). + +## 0.216.0 + +CLI: + * Propagate correct `User-Agent` for CLI during OAuth flow ([#1264](https://github.com/databricks/cli/pull/1264)). + * Add usage string when command fails with incorrect arguments ([#1276](https://github.com/databricks/cli/pull/1276)). + +Bundles: + * Include `dyn.Path` as argument to the visit callback function ([#1260](https://github.com/databricks/cli/pull/1260)). + * Inline logic to set a value in `dyn.SetByPath` ([#1261](https://github.com/databricks/cli/pull/1261)). + * Add assertions for the `dyn.Path` argument to the visit callback ([#1265](https://github.com/databricks/cli/pull/1265)). + * Add `dyn.MapByPattern` to map a function to values with matching paths ([#1266](https://github.com/databricks/cli/pull/1266)). + * Filter current user from resource permissions ([#1262](https://github.com/databricks/cli/pull/1262)). + * Retain location annotation when expanding globs for pipeline libraries ([#1274](https://github.com/databricks/cli/pull/1274)). + * Added deployment state for bundles ([#1267](https://github.com/databricks/cli/pull/1267)). + * Do CheckRunningResource only after terraform.Write ([#1292](https://github.com/databricks/cli/pull/1292)). + * Rewrite relative paths using `dyn.Location` of the underlying value ([#1273](https://github.com/databricks/cli/pull/1273)). + * Push deployment state right after files upload ([#1293](https://github.com/databricks/cli/pull/1293)). + * Make `Append` function to `dyn.Path` return independent slice ([#1295](https://github.com/databricks/cli/pull/1295)). + * Move bundle tests into bundle/tests ([#1299](https://github.com/databricks/cli/pull/1299)). + * Upgrade Terraform provider to 1.38.0 ([#1308](https://github.com/databricks/cli/pull/1308)). + +Internal: + * Add integration test for mlops-stacks initialization ([#1155](https://github.com/databricks/cli/pull/1155)). + * Update actions/setup-python to v5 ([#1290](https://github.com/databricks/cli/pull/1290)). + * Update codecov/codecov-action to v4 ([#1291](https://github.com/databricks/cli/pull/1291)). + +API Changes: + * Changed `databricks catalogs list` command. + * Changed `databricks online-tables create` command. + * Changed `databricks lakeview publish` command. + * Added `databricks lakeview create` command. + * Added `databricks lakeview get` command. + * Added `databricks lakeview get-published` command. + * Added `databricks lakeview trash` command. + * Added `databricks lakeview update` command. + * Moved settings related commands to `databricks settings` and `databricks account settings`. + +OpenAPI commit 93763b0d7ae908520c229c786fff28b8fd623261 (2024-03-20) + +Dependency updates: + * Bump golang.org/x/oauth2 from 0.17.0 to 0.18.0 ([#1270](https://github.com/databricks/cli/pull/1270)). + * Bump golang.org/x/mod from 0.15.0 to 0.16.0 ([#1271](https://github.com/databricks/cli/pull/1271)). + * Update Go SDK to v0.35.0 ([#1300](https://github.com/databricks/cli/pull/1300)). + * Update Go SDK to v0.36.0 ([#1304](https://github.com/databricks/cli/pull/1304)). + +## 0.215.0 + +CLI: +* The SDK update fixes `fs cp` calls timing out when copying large files. + +Bundles: +* Fix summary command when internal Terraform config doesn't exist ([#1242](https://github.com/databricks/cli/pull/1242)). +* Configure cobra.NoArgs for bundle commands where applicable ([#1250](https://github.com/databricks/cli/pull/1250)). +* Fixed building Python artifacts on Windows with WSL ([#1249](https://github.com/databricks/cli/pull/1249)). +* Add `--validate-only` flag to run validate-only pipeline update ([#1251](https://github.com/databricks/cli/pull/1251)). +* Only transform wheel libraries when using trampoline ([#1248](https://github.com/databricks/cli/pull/1248)). +* Return `application_id` for service principal lookups ([#1245](https://github.com/databricks/cli/pull/1245)). +* Support relative paths in artifact files source section and always upload all artifact files ([#1247](https://github.com/databricks/cli/pull/1247)). +* Fix DBConnect support in VS Code ([#1253](https://github.com/databricks/cli/pull/1253)). + +Internal: +* Added test to verify scripts.Execute mutator works correctly ([#1237](https://github.com/databricks/cli/pull/1237)). + +API Changes: +* Added `databricks permission-migration` command group. +* Updated nesting of the `databricks settings` and `databricks account settings commands` +* Changed `databricks vector-search-endpoints delete-endpoint` command with new required argument order. +* Changed `databricks vector-search-indexes create-index` command with new required argument order. +* Changed `databricks vector-search-indexes delete-data-vector-index` command with new required argument order. +* Changed `databricks vector-search-indexes upsert-data-vector-index` command with new required argument order. + +OpenAPI commit d855b30f25a06fe84f25214efa20e7f1fffcdf9e (2024-03-04) + +Dependency updates: +* Bump github.com/stretchr/testify from 1.8.4 to 1.9.0 ([#1252](https://github.com/databricks/cli/pull/1252)). +* Update Go SDK to v0.34.0 ([#1256](https://github.com/databricks/cli/pull/1256)). +## 0.214.1 + +CLI: + * Improved error message when no .databrickscfg ([#1223](https://github.com/databricks/cli/pull/1223)). + * Use Go SDK Iterators when listing resources with the CLI ([#1202](https://github.com/databricks/cli/pull/1202)). + +Bundles: + * Only set ComputeID value when `--compute-id` flag provided ([#1229](https://github.com/databricks/cli/pull/1229)). + * Add correct tag value for models in dev mode ([#1230](https://github.com/databricks/cli/pull/1230)). + * Upgrade Terraform provider to 1.37.0 ([#1235](https://github.com/databricks/cli/pull/1235)). + +Internal: + * Fix CLI nightlies on our UC workspaces ([#1225](https://github.com/databricks/cli/pull/1225)). + * Handle alias types for map keys in toTyped conversion ([#1232](https://github.com/databricks/cli/pull/1232)). + + + +## 0.214.0 + +CLI: + * Add support for UC Volumes to the `databricks fs` commands ([#1209](https://github.com/databricks/cli/pull/1209)). + +Bundles: + * Use dynamic configuration model in bundles ([#1098](https://github.com/databricks/cli/pull/1098)). + * Allow use of variables references in primitive non-string fields ([#1219](https://github.com/databricks/cli/pull/1219)). + * Add an experimental default-sql template ([#1051](https://github.com/databricks/cli/pull/1051)). + * Add an experimental dbt-sql template ([#1059](https://github.com/databricks/cli/pull/1059)). + +Internal: + * Add fork-user to winget release workflow ([#1214](https://github.com/databricks/cli/pull/1214)). + * Use `any` as type for data sources and resources in `tf/schema` ([#1216](https://github.com/databricks/cli/pull/1216)). + * Avoid infinite recursion when normalizing a recursive type ([#1213](https://github.com/databricks/cli/pull/1213)). + * Fix issue where interpolating a new ref would rewrite unrelated fields ([#1217](https://github.com/databricks/cli/pull/1217)). + * Use `dyn.Value` as input to generating Terraform JSON ([#1218](https://github.com/databricks/cli/pull/1218)). + +API Changes: + * Changed `databricks lakehouse-monitors update` command with new required argument order. + * Added `databricks online-tables` command group. + +OpenAPI commit cdd76a98a4fca7008572b3a94427566dd286c63b (2024-02-19) +Dependency updates: + * Bump Terraform provider to v1.36.2 ([#1215](https://github.com/databricks/cli/pull/1215)). + * Bump github.com/databricks/databricks-sdk-go from 0.32.0 to 0.33.0 ([#1222](https://github.com/databricks/cli/pull/1222)). + +## 0.213.0 + +CLI: + * Ignore environment variables for `auth profiles` ([#1189](https://github.com/databricks/cli/pull/1189)). + * Update LICENSE file to match Databricks license language ([#1013](https://github.com/databricks/cli/pull/1013)). + +Bundles: + * Added `bundle deployment bind` and `unbind` command ([#1131](https://github.com/databricks/cli/pull/1131)). + * Use allowlist for Git-related fields to include in metadata ([#1187](https://github.com/databricks/cli/pull/1187)). + * Added `--restart` flag for `bundle run` command ([#1191](https://github.com/databricks/cli/pull/1191)). + * Generate correct YAML if `custom_tags` or `spark_conf` is used for pipeline or job cluster configuration ([#1210](https://github.com/databricks/cli/pull/1210)). + +Internal: + * Move folders package into libs ([#1184](https://github.com/databricks/cli/pull/1184)). + * Log time it takes for profile to load ([#1186](https://github.com/databricks/cli/pull/1186)). + * Use mockery to generate mocks compatible with testify/mock ([#1190](https://github.com/databricks/cli/pull/1190)). + * Retain partially valid structs in `convert.Normalize` ([#1203](https://github.com/databricks/cli/pull/1203)). + * Skip `for_each_task` when generating the bundle schema ([#1204](https://github.com/databricks/cli/pull/1204)). + * Regenerate the CLI using the same OpenAPI spec as the SDK ([#1205](https://github.com/databricks/cli/pull/1205)). + * Avoid race-conditions while executing sub-commands ([#1201](https://github.com/databricks/cli/pull/1201)). + +API Changes: + * Added `databricks tables exists` command. + * Added `databricks lakehouse-monitors` command group. + * Removed `databricks files get-status` command. + * Added `databricks files create-directory` command. + * Added `databricks files delete-directory` command. + * Added `databricks files get-directory-metadata` command. + * Added `databricks files get-metadata` command. + * Added `databricks files list-directory-contents` command. + * Removed `databricks pipelines reset` command. + * Changed `databricks account settings delete-personal-compute-setting` command with new required argument order. + * Removed `databricks account settings read-personal-compute-setting` command. + * Changed `databricks account settings update-personal-compute-setting` command with new required argument order. + * Added `databricks account settings get-personal-compute-setting` command. + * Removed `databricks settings delete-default-workspace-namespace` command. + * Removed `databricks settings read-default-workspace-namespace` command. + * Removed `databricks settings update-default-workspace-namespace` command. + * Added `databricks settings delete-default-namespace-setting` command. + * Added `databricks settings delete-restrict-workspace-admins-setting` command. + * Added `databricks settings get-default-namespace-setting` command. + * Added `databricks settings get-restrict-workspace-admins-setting` command. + * Added `databricks settings update-default-namespace-setting` command. + * Added `databricks settings update-restrict-workspace-admins-setting` command. + * Changed `databricks token-management create-obo-token` command with new required argument order. + * Changed `databricks token-management get` command to return . + * Changed `databricks dashboards create` command . New request type is . + * Added `databricks dashboards update` command. + +OpenAPI commit c40670f5a2055c92cf0a6aac92a5bccebfb80866 (2024-02-14) +Dependency updates: + * Bump github.com/hashicorp/hc-install from 0.6.2 to 0.6.3 ([#1200](https://github.com/databricks/cli/pull/1200)). + * Bump golang.org/x/term from 0.16.0 to 0.17.0 ([#1197](https://github.com/databricks/cli/pull/1197)). + * Bump golang.org/x/oauth2 from 0.16.0 to 0.17.0 ([#1198](https://github.com/databricks/cli/pull/1198)). + * Bump github.com/databricks/databricks-sdk-go from 0.30.1 to 0.32.0 ([#1199](https://github.com/databricks/cli/pull/1199)). + +## 0.212.4 + +Bundles: + * Allow specifying executable in artifact section and skip bash from WSL ([#1169](https://github.com/databricks/cli/pull/1169)). + * Added warning when trying to deploy bundle with `--fail-on-active-runs` and running resources ([#1163](https://github.com/databricks/cli/pull/1163)). + * Group bundle run flags by job and pipeline types ([#1174](https://github.com/databricks/cli/pull/1174)). + * Make sure grouped flags are added to the command flag set ([#1180](https://github.com/databricks/cli/pull/1180)). + * Add short_name helper function to bundle init templates ([#1167](https://github.com/databricks/cli/pull/1167)). + +Internal: + * Fix dynamic representation of zero values in maps and slices ([#1154](https://github.com/databricks/cli/pull/1154)). + * Refactor library to artifact matching to not use pointers ([#1172](https://github.com/databricks/cli/pull/1172)). + * Harden `dyn.Value` equality check ([#1173](https://github.com/databricks/cli/pull/1173)). + * Ensure every variable reference is passed to lookup function ([#1176](https://github.com/databricks/cli/pull/1176)). + * Empty struct should yield empty map in `convert.FromTyped` ([#1177](https://github.com/databricks/cli/pull/1177)). + * Zero destination struct in `convert.ToTyped` ([#1178](https://github.com/databricks/cli/pull/1178)). + * Fix integration test with invalid configuration ([#1182](https://github.com/databricks/cli/pull/1182)). + * Use `acc.WorkspaceTest` helper from bundle integration tests ([#1181](https://github.com/databricks/cli/pull/1181)). + ## 0.212.3 CLI: diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 000000000..b2a61a767 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,25 @@ +FROM alpine:3.19 as builder + +RUN ["apk", "add", "jq"] +RUN ["apk", "add", "bash"] + +WORKDIR /build + +COPY ./docker/setup.sh /build/docker/setup.sh +COPY ./databricks /app/databricks +COPY ./docker/config.tfrc /app/config/config.tfrc + +ARG ARCH +RUN /build/docker/setup.sh + +# Start from a fresh base image, to remove any build artifacts and scripts. +FROM alpine:3.19 + +ENV DATABRICKS_TF_EXEC_PATH "/app/bin/terraform" +ENV DATABRICKS_TF_CLI_CONFIG_FILE "/app/config/config.tfrc" +ENV PATH="/app:${PATH}" + +COPY --from=builder /app /app + +ENTRYPOINT ["/app/databricks"] +CMD ["-h"] diff --git a/LICENSE b/LICENSE index 21db58bb9..9878fb474 100644 --- a/LICENSE +++ b/LICENSE @@ -1,25 +1,69 @@ -DB license + Databricks License + Copyright (2022) Databricks, Inc. -Copyright (2022) Databricks, Inc. + Definitions. -Definitions. + Agreement: The agreement between Databricks, Inc., and you governing + the use of the Databricks Services, as that term is defined in + the Master Cloud Services Agreement (MCSA) located at + www.databricks.com/legal/mcsa. -Agreement: The agreement between Databricks, Inc., and you governing the use of the Databricks Services, which shall be, with respect to Databricks, the Databricks Terms of Service located at www.databricks.com/termsofservice, and with respect to Databricks Community Edition, the Community Edition Terms of Service located at www.databricks.com/ce-termsofuse, in each case unless you have entered into a separate written agreement with Databricks governing the use of the applicable Databricks Services. + Licensed Materials: The source code, object code, data, and/or other + works to which this license applies. -Software: The source code and object code to which this license applies. + Scope of Use. You may not use the Licensed Materials except in + connection with your use of the Databricks Services pursuant to + the Agreement. Your use of the Licensed Materials must comply at all + times with any restrictions applicable to the Databricks Services, + generally, and must be used in accordance with any applicable + documentation. You may view, use, copy, modify, publish, and/or + distribute the Licensed Materials solely for the purposes of using + the Licensed Materials within or connecting to the Databricks Services. + If you do not agree to these terms, you may not view, use, copy, + modify, publish, and/or distribute the Licensed Materials. -Scope of Use. You may not use this Software except in connection with your use of the Databricks Services pursuant to the Agreement. Your use of the Software must comply at all times with any restrictions applicable to the Databricks Services, generally, and must be used in accordance with any applicable documentation. You may view, use, copy, modify, publish, and/or distribute the Software solely for the purposes of using the code within or connecting to the Databricks Services. If you do not agree to these terms, you may not view, use, copy, modify, publish, and/or distribute the Software. + Redistribution. You may redistribute and sublicense the Licensed + Materials so long as all use is in compliance with these terms. + In addition: -Redistribution. You may redistribute and sublicense the Software so long as all use is in compliance with these terms. In addition: + - You must give any other recipients a copy of this License; + - You must cause any modified files to carry prominent notices + stating that you changed the files; + - You must retain, in any derivative works that you distribute, + all copyright, patent, trademark, and attribution notices, + excluding those notices that do not pertain to any part of + the derivative works; and + - If a "NOTICE" text file is provided as part of its + distribution, then any derivative works that you distribute + must include a readable copy of the attribution notices + contained within such NOTICE file, excluding those notices + that do not pertain to any part of the derivative works. -You must give any other recipients a copy of this License; -You must cause any modified files to carry prominent notices stating that you changed the files; -You must retain, in the source code form of any derivative works that you distribute, all copyright, patent, trademark, and attribution notices from the source code form, excluding those notices that do not pertain to any part of the derivative works; and -If the source code form includes a "NOTICE" text file as part of its distribution, then any derivative works that you distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the derivative works. -You may add your own copyright statement to your modifications and may provide additional license terms and conditions for use, reproduction, or distribution of your modifications, or for any such derivative works as a whole, provided your use, reproduction, and distribution of the Software otherwise complies with the conditions stated in this License. + You may add your own copyright statement to your modifications and may + provide additional license terms and conditions for use, reproduction, + or distribution of your modifications, or for any such derivative works + as a whole, provided your use, reproduction, and distribution of + the Licensed Materials otherwise complies with the conditions stated + in this License. -Termination. This license terminates automatically upon your breach of these terms or upon the termination of your Agreement. Additionally, Databricks may terminate this license at any time on notice. Upon termination, you must permanently delete the Software and all copies thereof. + Termination. This license terminates automatically upon your breach of + these terms or upon the termination of your Agreement. Additionally, + Databricks may terminate this license at any time on notice. Upon + termination, you must permanently delete the Licensed Materials and + all copies thereof. -DISCLAIMER; LIMITATION OF LIABILITY. + DISCLAIMER; LIMITATION OF LIABILITY. -THE SOFTWARE IS PROVIDED “AS-IS” AND WITH ALL FAULTS. DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY DISCLAIMS ALL WARRANTIES RELATING TO THE SOURCE CODE, EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES, CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE SOURCE CODE SHALL BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + THE LICENSED MATERIALS ARE PROVIDED “AS-IS” AND WITH ALL FAULTS. + DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY + DISCLAIMS ALL WARRANTIES RELATING TO THE LICENSED MATERIALS, EXPRESS + AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES, + CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR + FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND + ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF + YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE LICENSED MATERIALS SHALL + BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL + THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR + OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, + ARISING FROM, OUT OF OR IN CONNECTION WITH THE LICENSED MATERIALS OR + THE USE OR OTHER DEALINGS IN THE LICENSED MATERIALS. diff --git a/NOTICE b/NOTICE index 7c7eb7db4..d8306510e 100644 --- a/NOTICE +++ b/NOTICE @@ -16,16 +16,12 @@ go-ini/ini - https://github.com/go-ini/ini Copyright ini authors License - https://github.com/go-ini/ini/blob/main/LICENSE -uber-go/mock - https://go.uber.org/mock -Copyright Google Inc. -License - https://github.com/uber-go/mock/blob/main/LICENSE - —-- This software contains code from the following open source projects, licensed under the MPL 2.0 license: hashicopr/go-version - https://github.com/hashicorp/go-version -Copyright 2014 HashiCorp, Inc. +Copyright 2014 HashiCorp, Inc. License - https://github.com/hashicorp/go-version/blob/main/LICENSE hashicorp/hc-install - https://github.com/hashicorp/hc-install @@ -40,6 +36,10 @@ hashicorp/terraform-json - https://github.com/hashicorp/terraform-json Copyright 2019 HashiCorp, Inc. License - https://github.com/hashicorp/terraform-json/blob/main/LICENSE +hashicorp/terraform - https://github.com/hashicorp/terraform +Copyright 2014 HashiCorp, Inc. +License - https://github.com/hashicorp/terraform/blob/v1.5.5/LICENSE + --- This software contains code from the following open source projects, licensed under the BSD (2-clause) license: @@ -61,11 +61,6 @@ google/uuid - https://github.com/google/uuid Copyright (c) 2009,2014 Google Inc. All rights reserved. License - https://github.com/google/uuid/blob/master/LICENSE -imdario/mergo - https://github.com/imdario/mergo -Copyright (c) 2013 Dario Castañé. All rights reserved. -Copyright (c) 2012 The Go Authors. All rights reserved. -License - https://github.com/imdario/mergo/blob/master/LICENSE - manifoldco/promptui - https://github.com/manifoldco/promptui Copyright (c) 2017, Arigato Machine Inc. All rights reserved. License - https://github.com/manifoldco/promptui/blob/master/LICENSE.md @@ -81,7 +76,11 @@ License - https://github.com/fatih/color/blob/main/LICENSE.md ghodss/yaml - https://github.com/ghodss/yaml Copyright (c) 2014 Sam Ghods License - https://github.com/ghodss/yaml/blob/master/LICENSE - + +Masterminds/semver - https://github.com/Masterminds/semver +Copyright (C) 2014-2019, Matt Butcher and Matt Farina +License - https://github.com/Masterminds/semver/blob/master/LICENSE.txt + mattn/go-isatty - https://github.com/mattn/go-isatty Copyright (c) Yasuhiro MATSUMOTO https://github.com/mattn/go-isatty/blob/master/LICENSE diff --git a/README.md b/README.md index 83051ccf7..51780d0f9 100644 --- a/README.md +++ b/README.md @@ -4,16 +4,31 @@ This project is in Public Preview. -Documentation about the full REST API coverage is available in the [docs folder](docs/commands.md). - Documentation is available at https://docs.databricks.com/dev-tools/cli/databricks-cli.html. ## Installation This CLI is packaged as a dependency-free binary executable and may be located in any directory. See https://github.com/databricks/cli/releases for releases and -[the docs pages](https://docs.databricks.com/dev-tools/cli/databricks-cli.html) for -installation instructions. +the [Databricks documentation](https://docs.databricks.com/en/dev-tools/cli/install.html) for detailed information about installing the CLI. + +------ +### Homebrew + +We maintain a [Homebrew tap](https://github.com/databricks/homebrew-tap) for installing the Databricks CLI. You can find instructions for how to install, upgrade and downgrade the CLI using Homebrew [here](https://github.com/databricks/homebrew-tap/blob/main/README.md). + +------ +### Docker +You can use the CLI via a Docker image by pulling the image from `ghcr.io`. You can find all available versions +at: https://github.com/databricks/cli/pkgs/container/cli. +``` +docker pull ghcr.io/databricks/cli:latest +``` + +Example of how to run the CLI using the Docker image. More documentation is available at https://docs.databricks.com/dev-tools/bundles/airgapped-environment.html. +``` +docker run -e DATABRICKS_HOST=$YOUR_HOST_URL -e DATABRICKS_TOKEN=$YOUR_TOKEN ghcr.io/databricks/cli:latest current-user me +``` ## Authentication diff --git a/bundle/artifacts/all.go b/bundle/artifacts/all.go index 1a1661e5f..305193e2e 100644 --- a/bundle/artifacts/all.go +++ b/bundle/artifacts/all.go @@ -7,6 +7,7 @@ import ( "slices" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "golang.org/x/exp/maps" ) @@ -21,7 +22,7 @@ func (m *all) Name() string { return fmt.Sprintf("artifacts.%sAll", m.name) } -func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *all) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { var out []bundle.Mutator // Iterate with stable ordering. @@ -31,7 +32,7 @@ func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error { for _, name := range keys { m, err := m.fn(name) if err != nil { - return err + return diag.FromErr(err) } if m != nil { out = append(out, m) diff --git a/bundle/artifacts/artifacts.go b/bundle/artifacts/artifacts.go index 76d29f56c..15565cd60 100644 --- a/bundle/artifacts/artifacts.go +++ b/bundle/artifacts/artifacts.go @@ -8,13 +8,17 @@ import ( "os" "path" "path/filepath" + "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" ) type mutatorFactory = func(name string) bundle.Mutator @@ -56,17 +60,17 @@ func (m *basicBuild) Name() string { return fmt.Sprintf("artifacts.Build(%s)", m.name) } -func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name)) out, err := artifact.Build(ctx) if err != nil { - return fmt.Errorf("build for %s failed, error: %w, output: %s", m.name, err, out) + return diag.Errorf("build for %s failed, error: %v, output: %s", m.name, err, out) } log.Infof(ctx, "Build succeeded") @@ -86,54 +90,139 @@ func (m *basicUpload) Name() string { return fmt.Sprintf("artifacts.Upload(%s)", m.name) } -func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } if len(artifact.Files) == 0 { - return fmt.Errorf("artifact source is not configured: %s", m.name) + return diag.Errorf("artifact source is not configured: %s", m.name) } uploadPath, err := getUploadBasePath(b) if err != nil { - return err + return diag.FromErr(err) } - client, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), uploadPath) + client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath) if err != nil { - return err + return diag.FromErr(err) } - err = uploadArtifact(ctx, artifact, uploadPath, client) + err = uploadArtifact(ctx, b, artifact, uploadPath, client) if err != nil { - return fmt.Errorf("upload for %s failed, error: %w", m.name, err) + return diag.Errorf("upload for %s failed, error: %v", m.name, err) } return nil } -func uploadArtifact(ctx context.Context, a *config.Artifact, uploadPath string, client filer.Filer) error { +func getFilerForArtifacts(w *databricks.WorkspaceClient, uploadPath string) (filer.Filer, error) { + if isVolumesPath(uploadPath) { + return filer.NewFilesClient(w, uploadPath) + } + return filer.NewWorkspaceFilesClient(w, uploadPath) +} + +func isVolumesPath(path string) bool { + return strings.HasPrefix(path, "/Volumes/") +} + +func uploadArtifact(ctx context.Context, b *bundle.Bundle, a *config.Artifact, uploadPath string, client filer.Filer) error { for i := range a.Files { f := &a.Files[i] - if f.NeedsUpload() { - filename := filepath.Base(f.Source) - cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename)) - err := uploadArtifactFile(ctx, f.Source, client) - if err != nil { - return err - } - log.Infof(ctx, "Upload succeeded") - f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) + filename := filepath.Base(f.Source) + cmdio.LogString(ctx, fmt.Sprintf("Uploading %s...", filename)) + + err := uploadArtifactFile(ctx, f.Source, client) + if err != nil { + return err + } + + log.Infof(ctx, "Upload succeeded") + f.RemotePath = path.Join(uploadPath, filepath.Base(f.Source)) + remotePath := f.RemotePath + + if !strings.HasPrefix(f.RemotePath, "/Workspace/") && !strings.HasPrefix(f.RemotePath, "/Volumes/") { + wsfsBase := "/Workspace" + remotePath = path.Join(wsfsBase, f.RemotePath) + } + + for _, job := range b.Config.Resources.Jobs { + rewriteArtifactPath(b, f, job, remotePath) } } - a.NormalisePaths() return nil } +func rewriteArtifactPath(b *bundle.Bundle, f *config.ArtifactFile, job *resources.Job, remotePath string) { + // Rewrite artifact path in job task libraries + for i := range job.Tasks { + task := &job.Tasks[i] + for j := range task.Libraries { + lib := &task.Libraries[j] + if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { + lib.Whl = remotePath + } + if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { + lib.Jar = remotePath + } + } + + // Rewrite artifact path in job task libraries for ForEachTask + if task.ForEachTask != nil { + forEachTask := task.ForEachTask + for j := range forEachTask.Task.Libraries { + lib := &forEachTask.Task.Libraries[j] + if lib.Whl != "" && isArtifactMatchLibrary(f, lib.Whl, b) { + lib.Whl = remotePath + } + if lib.Jar != "" && isArtifactMatchLibrary(f, lib.Jar, b) { + lib.Jar = remotePath + } + } + } + } + + // Rewrite artifact path in job environments + for i := range job.Environments { + env := &job.Environments[i] + if env.Spec == nil { + continue + } + + for j := range env.Spec.Dependencies { + lib := env.Spec.Dependencies[j] + if isArtifactMatchLibrary(f, lib, b) { + env.Spec.Dependencies[j] = remotePath + } + } + } +} + +func isArtifactMatchLibrary(f *config.ArtifactFile, libPath string, b *bundle.Bundle) bool { + if !filepath.IsAbs(libPath) { + libPath = filepath.Join(b.RootPath, libPath) + } + + // libPath can be a glob pattern, so do the match first + matches, err := filepath.Glob(libPath) + if err != nil { + return false + } + + for _, m := range matches { + if m == f.Source { + return true + } + } + + return false +} + // Function to upload artifact file to Workspace func uploadArtifactFile(ctx context.Context, file string, client filer.Filer) error { raw, err := os.ReadFile(file) diff --git a/bundle/artifacts/artifacts_test.go b/bundle/artifacts/artifacts_test.go new file mode 100644 index 000000000..6d85f3af9 --- /dev/null +++ b/bundle/artifacts/artifacts_test.go @@ -0,0 +1,196 @@ +package artifacts + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestArtifactUploadForWorkspace(t *testing.T) { + tmpDir := t.TempDir() + whlFolder := filepath.Join(tmpDir, "whl") + testutil.Touch(t, whlFolder, "source.whl") + whlLocalPath := filepath.Join(whlFolder, "source.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/foo/bar/artifacts", + }, + Artifacts: config.Artifacts{ + "whl": { + Type: config.ArtifactPythonWheel, + Files: []config.ArtifactFile{ + {Source: whlLocalPath}, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + { + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + }, + }, + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + filepath.Join("whl", "source.whl"), + "/Workspace/Users/foo@bar.com/mywheel.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + artifact := b.Config.Artifacts["whl"] + mockFiler := mockfiler.NewMockFiler(t) + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source.whl"), + mock.AnythingOfType("*bytes.Reader"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil) + + err := uploadArtifact(context.Background(), b, artifact, "/foo/bar/artifacts", mockFiler) + require.NoError(t, err) + + // Test that libraries path is updated + require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) + require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) + require.Equal(t, "/Workspace/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) + require.Equal(t, "/Workspace/Users/foo@bar.com/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) +} + +func TestArtifactUploadForVolumes(t *testing.T) { + tmpDir := t.TempDir() + whlFolder := filepath.Join(tmpDir, "whl") + testutil.Touch(t, whlFolder, "source.whl") + whlLocalPath := filepath.Join(whlFolder, "source.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Workspace: config.Workspace{ + ArtifactPath: "/Volumes/foo/bar/artifacts", + }, + Artifacts: config.Artifacts{ + "whl": { + Type: config.ArtifactPythonWheel, + Files: []config.ArtifactFile{ + {Source: whlLocalPath}, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Volumes/some/path/mywheel.whl", + }, + }, + }, + { + ForEachTask: &jobs.ForEachTask{ + Task: jobs.Task{ + Libraries: []compute.Library{ + { + Whl: filepath.Join("whl", "*.whl"), + }, + { + Whl: "/Volumes/some/path/mywheel.whl", + }, + }, + }, + }, + }, + }, + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + filepath.Join("whl", "source.whl"), + "/Volumes/some/path/mywheel.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + artifact := b.Config.Artifacts["whl"] + mockFiler := mockfiler.NewMockFiler(t) + mockFiler.EXPECT().Write( + mock.Anything, + filepath.Join("source.whl"), + mock.AnythingOfType("*bytes.Reader"), + filer.OverwriteIfExists, + filer.CreateParentDirectories, + ).Return(nil) + + err := uploadArtifact(context.Background(), b, artifact, "/Volumes/foo/bar/artifacts", mockFiler) + require.NoError(t, err) + + // Test that libraries path is updated + require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[0].Whl) + require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[0].Libraries[1].Whl) + require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) + require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) + require.Equal(t, "/Volumes/foo/bar/artifacts/source.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[0].Whl) + require.Equal(t, "/Volumes/some/path/mywheel.whl", b.Config.Resources.Jobs["job"].JobSettings.Tasks[1].ForEachTask.Task.Libraries[1].Whl) +} diff --git a/bundle/artifacts/autodetect.go b/bundle/artifacts/autodetect.go index 6e80ef0b6..0e94edd82 100644 --- a/bundle/artifacts/autodetect.go +++ b/bundle/artifacts/autodetect.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -19,7 +20,7 @@ func (m *autodetect) Name() string { return "artifacts.DetectPackages" } -func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // If artifacts section explicitly defined, do not try to auto detect packages if b.Config.Artifacts != nil { log.Debugf(ctx, "artifacts block is defined, skipping auto-detecting") diff --git a/bundle/artifacts/build.go b/bundle/artifacts/build.go index 6b1aac822..c8c3bf67c 100644 --- a/bundle/artifacts/build.go +++ b/bundle/artifacts/build.go @@ -6,6 +6,8 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" ) func BuildAll() bundle.Mutator { @@ -27,10 +29,19 @@ func (m *build) Name() string { return fmt.Sprintf("artifacts.Build(%s)", m.name) } -func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) + } + + // Check if source paths are absolute, if not, make them absolute + for k := range artifact.Files { + f := &artifact.Files[k] + if !filepath.IsAbs(f.Source) { + dirPath := filepath.Dir(artifact.ConfigFilePath) + f.Source = filepath.Join(dirPath, f.Source) + } } // Skip building if build command is not specified or infered @@ -38,19 +49,59 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { // If no build command was specified or infered and there is no // artifact output files specified, artifact is misconfigured if len(artifact.Files) == 0 { - return fmt.Errorf("misconfigured artifact: please specify 'build' or 'files' property") + return diag.Errorf("misconfigured artifact: please specify 'build' or 'files' property") } - return nil + + // We can skip calling build mutator if there is no build command + // But we still need to expand glob references in files source path. + diags := expandGlobReference(artifact) + return diags } // If artifact path is not provided, use bundle root dir if artifact.Path == "" { - artifact.Path = b.Config.Path + artifact.Path = b.RootPath } if !filepath.IsAbs(artifact.Path) { - artifact.Path = filepath.Join(b.Config.Path, artifact.Path) + dirPath := filepath.Dir(artifact.ConfigFilePath) + artifact.Path = filepath.Join(dirPath, artifact.Path) } - return bundle.Apply(ctx, b, getBuildMutator(artifact.Type, m.name)) + diags := bundle.Apply(ctx, b, getBuildMutator(artifact.Type, m.name)) + if diags.HasError() { + return diags + } + + // We need to expand glob reference after build mutator is applied because + // if we do it before, any files that are generated by build command will + // not be included into artifact.Files and thus will not be uploaded. + d := expandGlobReference(artifact) + return diags.Extend(d) +} + +func expandGlobReference(artifact *config.Artifact) diag.Diagnostics { + var diags diag.Diagnostics + + // Expand any glob reference in files source path + files := make([]config.ArtifactFile, 0, len(artifact.Files)) + for _, f := range artifact.Files { + matches, err := filepath.Glob(f.Source) + if err != nil { + return diags.Extend(diag.Errorf("unable to find files for %s: %v", f.Source, err)) + } + + if len(matches) == 0 { + return diags.Extend(diag.Errorf("no files found for %s", f.Source)) + } + + for _, match := range matches { + files = append(files, config.ArtifactFile{ + Source: match, + }) + } + } + + artifact.Files = files + return diags } diff --git a/bundle/artifacts/infer.go b/bundle/artifacts/infer.go index ade5def51..abc509107 100644 --- a/bundle/artifacts/infer.go +++ b/bundle/artifacts/infer.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts/whl" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" ) var inferMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{ @@ -41,10 +42,10 @@ func (m *infer) Name() string { return fmt.Sprintf("artifacts.Infer(%s)", m.name) } -func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } // only try to infer command if it's not already defined diff --git a/bundle/artifacts/upload.go b/bundle/artifacts/upload.go index 990718aa4..3af50021e 100644 --- a/bundle/artifacts/upload.go +++ b/bundle/artifacts/upload.go @@ -5,7 +5,9 @@ import ( "fmt" "github.com/databricks/cli/bundle" - "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/log" ) func UploadAll() bundle.Mutator { @@ -31,14 +33,14 @@ func (m *upload) Name() string { return fmt.Sprintf("artifacts.Upload(%s)", m.name) } -func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } if len(artifact.Files) == 0 { - return fmt.Errorf("artifact source is not configured: %s", m.name) + return diag.Errorf("artifact source is not configured: %s", m.name) } return bundle.Apply(ctx, b, getUploadMutator(artifact.Type, m.name)) @@ -50,20 +52,26 @@ func (m *cleanUp) Name() string { return "artifacts.CleanUp" } -func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { uploadPath, err := getUploadBasePath(b) if err != nil { - return err + return diag.FromErr(err) } - b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{ - Path: uploadPath, - Recursive: true, - }) - - err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, uploadPath) + client, err := getFilerForArtifacts(b.WorkspaceClient(), uploadPath) if err != nil { - return fmt.Errorf("unable to create directory for %s: %w", uploadPath, err) + return diag.FromErr(err) + } + + // We intentionally ignore the error because it is not critical to the deployment + err = client.Delete(ctx, ".", filer.DeleteRecursively) + if err != nil { + log.Errorf(ctx, "failed to delete %s: %v", uploadPath, err) + } + + err = client.Mkdir(ctx, ".") + if err != nil { + return diag.Errorf("unable to create directory for %s: %v", uploadPath, err) } return nil diff --git a/bundle/artifacts/upload_test.go b/bundle/artifacts/upload_test.go new file mode 100644 index 000000000..cf08843a7 --- /dev/null +++ b/bundle/artifacts/upload_test.go @@ -0,0 +1,109 @@ +package artifacts + +import ( + "context" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/testfile" + "github.com/stretchr/testify/require" +) + +type noop struct{} + +func (n *noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics { + return nil +} + +func (n *noop) Name() string { + return "noop" +} + +func TestExpandGlobFilesSource(t *testing.T) { + rootPath := t.TempDir() + err := os.Mkdir(filepath.Join(rootPath, "test"), 0755) + require.NoError(t, err) + + t1 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar1.jar")) + t1.Close(t) + + t2 := testfile.CreateFile(t, filepath.Join(rootPath, "test", "myjar2.jar")) + t2.Close(t) + + b := &bundle.Bundle{ + RootPath: rootPath, + Config: config.Root{ + Artifacts: map[string]*config.Artifact{ + "test": { + Type: "custom", + Files: []config.ArtifactFile{ + { + Source: filepath.Join("..", "test", "*.jar"), + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml")) + + u := &upload{"test"} + uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { + return &noop{} + } + + bm := &build{"test"} + buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { + return &noop{} + } + + diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u)) + require.NoError(t, diags.Error()) + + require.Equal(t, 2, len(b.Config.Artifacts["test"].Files)) + require.Equal(t, filepath.Join(rootPath, "test", "myjar1.jar"), b.Config.Artifacts["test"].Files[0].Source) + require.Equal(t, filepath.Join(rootPath, "test", "myjar2.jar"), b.Config.Artifacts["test"].Files[1].Source) +} + +func TestExpandGlobFilesSourceWithNoMatches(t *testing.T) { + rootPath := t.TempDir() + err := os.Mkdir(filepath.Join(rootPath, "test"), 0755) + require.NoError(t, err) + + b := &bundle.Bundle{ + RootPath: rootPath, + Config: config.Root{ + Artifacts: map[string]*config.Artifact{ + "test": { + Type: "custom", + Files: []config.ArtifactFile{ + { + Source: filepath.Join("..", "test", "myjar.jar"), + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, ".", filepath.Join(rootPath, "resources", "artifacts.yml")) + + u := &upload{"test"} + uploadMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { + return &noop{} + } + + bm := &build{"test"} + buildMutators[config.ArtifactType("custom")] = func(name string) bundle.Mutator { + return &noop{} + } + + diags := bundle.Apply(context.Background(), b, bundle.Seq(bm, u)) + require.ErrorContains(t, diags.Error(), "no files found for") +} diff --git a/bundle/artifacts/whl/autodetect.go b/bundle/artifacts/whl/autodetect.go index c858a38c0..ee77fff01 100644 --- a/bundle/artifacts/whl/autodetect.go +++ b/bundle/artifacts/whl/autodetect.go @@ -11,6 +11,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -25,7 +26,7 @@ func (m *detectPkg) Name() string { return "artifacts.whl.AutoDetect" } -func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b) if len(wheelTasks) == 0 { log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect") @@ -34,23 +35,23 @@ func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "Detecting Python wheel project...") // checking if there is setup.py in the bundle root - setupPy := filepath.Join(b.Config.Path, "setup.py") + setupPy := filepath.Join(b.RootPath, "setup.py") _, err := os.Stat(setupPy) if err != nil { log.Infof(ctx, "No Python wheel project found at bundle root folder") return nil } - log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.Config.Path)) + log.Infof(ctx, fmt.Sprintf("Found Python wheel project at %s", b.RootPath)) module := extractModuleName(setupPy) if b.Config.Artifacts == nil { b.Config.Artifacts = make(map[string]*config.Artifact) } - pkgPath, err := filepath.Abs(b.Config.Path) + pkgPath, err := filepath.Abs(b.RootPath) if err != nil { - return err + return diag.FromErr(err) } b.Config.Artifacts[module] = &config.Artifact{ Path: pkgPath, diff --git a/bundle/artifacts/whl/build.go b/bundle/artifacts/whl/build.go index aeec31a63..992ade297 100644 --- a/bundle/artifacts/whl/build.go +++ b/bundle/artifacts/whl/build.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/python" ) @@ -27,10 +28,10 @@ func (m *build) Name() string { return fmt.Sprintf("artifacts.whl.Build(%s)", m.name) } -func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *build) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact, ok := b.Config.Artifacts[m.name] if !ok { - return fmt.Errorf("artifact doesn't exist: %s", m.name) + return diag.Errorf("artifact doesn't exist: %s", m.name) } cmdio.LogString(ctx, fmt.Sprintf("Building %s...", m.name)) @@ -43,13 +44,13 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error { out, err := artifact.Build(ctx) if err != nil { - return fmt.Errorf("build failed %s, error: %w, output: %s", m.name, err, out) + return diag.Errorf("build failed %s, error: %v, output: %s", m.name, err, out) } log.Infof(ctx, "Build succeeded") wheels := python.FindFilesWithSuffixInPath(distPath, ".whl") if len(wheels) == 0 { - return fmt.Errorf("cannot find built wheel in %s for package %s", dir, m.name) + return diag.Errorf("cannot find built wheel in %s for package %s", dir, m.name) } for _, wheel := range wheels { artifact.Files = append(artifact.Files, config.ArtifactFile{ diff --git a/bundle/artifacts/whl/from_libraries.go b/bundle/artifacts/whl/from_libraries.go index 9d35f6314..ad321557c 100644 --- a/bundle/artifacts/whl/from_libraries.go +++ b/bundle/artifacts/whl/from_libraries.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -20,7 +21,7 @@ func (m *fromLibraries) Name() string { return "artifacts.whl.DefineArtifactsFromLibraries" } -func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error { +func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if len(b.Config.Artifacts) != 0 { log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined") return nil @@ -29,24 +30,18 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error { tasks := libraries.FindAllWheelTasksWithLocalLibraries(b) for _, task := range tasks { for _, lib := range task.Libraries { - matches, err := filepath.Glob(filepath.Join(b.Config.Path, lib.Whl)) - // File referenced from libraries section does not exists, skipping - if err != nil { - continue - } + matchAndAdd(ctx, lib.Whl, b) + } + } - for _, match := range matches { - name := filepath.Base(match) - if b.Config.Artifacts == nil { - b.Config.Artifacts = make(map[string]*config.Artifact) - } - - log.Debugf(ctx, "Adding an artifact block for %s", match) - b.Config.Artifacts[name] = &config.Artifact{ - Files: []config.ArtifactFile{ - {Source: match}, - }, - Type: config.ArtifactPythonWheel, + envs := libraries.FindAllEnvironments(b) + for _, jobEnvs := range envs { + for _, env := range jobEnvs { + if env.Spec != nil { + for _, dep := range env.Spec.Dependencies { + if libraries.IsEnvironmentDependencyLocal(dep) { + matchAndAdd(ctx, dep, b) + } } } } @@ -54,3 +49,26 @@ func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error { return nil } + +func matchAndAdd(ctx context.Context, lib string, b *bundle.Bundle) { + matches, err := filepath.Glob(filepath.Join(b.RootPath, lib)) + // File referenced from libraries section does not exists, skipping + if err != nil { + return + } + + for _, match := range matches { + name := filepath.Base(match) + if b.Config.Artifacts == nil { + b.Config.Artifacts = make(map[string]*config.Artifact) + } + + log.Debugf(ctx, "Adding an artifact block for %s", match) + b.Config.Artifacts[name] = &config.Artifact{ + Files: []config.ArtifactFile{ + {Source: match}, + }, + Type: config.ArtifactPythonWheel, + } + } +} diff --git a/bundle/artifacts/whl/infer.go b/bundle/artifacts/whl/infer.go index dc2b8e233..dd4ad2956 100644 --- a/bundle/artifacts/whl/infer.go +++ b/bundle/artifacts/whl/infer.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/python" ) @@ -12,11 +13,11 @@ type infer struct { name string } -func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { artifact := b.Config.Artifacts[m.name] py, err := python.DetectExecutable(ctx) if err != nil { - return err + return diag.FromErr(err) } // Note: using --build-number (build tag) flag does not help with re-installing diff --git a/bundle/bundle.go b/bundle/bundle.go index 9e21cb561..032d98abc 100644 --- a/bundle/bundle.go +++ b/bundle/bundle.go @@ -16,12 +16,13 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" "github.com/databricks/cli/bundle/metadata" - "github.com/databricks/cli/folders" + "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/tags" "github.com/databricks/cli/libs/terraform" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" sdkconfig "github.com/databricks/databricks-sdk-go/config" "github.com/hashicorp/terraform-exec/tfexec" @@ -30,6 +31,14 @@ import ( const internalFolder = ".internal" type Bundle struct { + // RootPath contains the directory path to the root of the bundle. + // It is set when we instantiate a new bundle instance. + RootPath string + + // BundleRoot is a virtual filesystem path to the root of the bundle. + // Exclusively use this field for filesystem operations. + BundleRoot vfs.Path + Config config.Root // Metadata about the bundle deployment. This is the interface Databricks services @@ -45,6 +54,9 @@ type Bundle struct { clientOnce sync.Once client *databricks.WorkspaceClient + // Files that are synced to the workspace.file_path + Files []fileset.File + // Stores an initialized copy of this bundle's Terraform wrapper. Terraform *tfexec.Terraform @@ -63,33 +75,15 @@ type Bundle struct { } func Load(ctx context.Context, path string) (*Bundle, error) { - b := &Bundle{} - stat, err := os.Stat(path) - if err != nil { - return nil, err + b := &Bundle{ + RootPath: filepath.Clean(path), + BundleRoot: vfs.MustNew(path), } configFile, err := config.FileNames.FindInPath(path) if err != nil { - _, hasRootEnv := env.Root(ctx) - _, hasIncludesEnv := env.Includes(ctx) - if hasRootEnv && hasIncludesEnv && stat.IsDir() { - log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path) - b.Config = config.Root{ - Path: path, - Bundle: config.Bundle{ - Name: filepath.Base(path), - }, - } - return b, nil - } return nil, err } - log.Debugf(ctx, "Loading bundle configuration from: %s", configFile) - root, err := config.Load(configFile) - if err != nil { - return nil, err - } - b.Config = *root + log.Debugf(ctx, "Found bundle root at %s (file %s)", b.RootPath, configFile) return b, nil } @@ -158,7 +152,7 @@ func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error) if !exists || cacheDirName == "" { cacheDirName = filepath.Join( // Anchor at bundle root directory. - b.Config.Path, + b.RootPath, // Static cache directory. ".databricks", "bundle", @@ -210,7 +204,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { if err != nil { return nil, err } - internalDirRel, err := filepath.Rel(b.Config.Path, internalDir) + internalDirRel, err := filepath.Rel(b.RootPath, internalDir) if err != nil { return nil, err } @@ -218,12 +212,12 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { } func (b *Bundle) GitRepository() (*git.Repository, error) { - rootPath, err := folders.FindDirWithLeaf(b.Config.Path, ".git") + _, err := vfs.FindLeafInTree(b.BundleRoot, ".git") if err != nil { return nil, fmt.Errorf("unable to locate repository root: %w", err) } - return git.NewRepository(rootPath) + return git.NewRepository(b.BundleRoot) } // AuthEnv returns a map with environment variables and their values diff --git a/bundle/bundle_read_only.go b/bundle/bundle_read_only.go new file mode 100644 index 000000000..59084f2ac --- /dev/null +++ b/bundle/bundle_read_only.go @@ -0,0 +1,41 @@ +package bundle + +import ( + "context" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/vfs" + "github.com/databricks/databricks-sdk-go" +) + +type ReadOnlyBundle struct { + b *Bundle +} + +func ReadOnly(b *Bundle) ReadOnlyBundle { + return ReadOnlyBundle{b: b} +} + +func (r ReadOnlyBundle) Config() config.Root { + return r.b.Config +} + +func (r ReadOnlyBundle) RootPath() string { + return r.b.RootPath +} + +func (r ReadOnlyBundle) BundleRoot() vfs.Path { + return r.b.BundleRoot +} + +func (r ReadOnlyBundle) WorkspaceClient() *databricks.WorkspaceClient { + return r.b.WorkspaceClient() +} + +func (r ReadOnlyBundle) CacheDir(ctx context.Context, paths ...string) (string, error) { + return r.b.CacheDir(ctx, paths...) +} + +func (r ReadOnlyBundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) { + return r.b.GetSyncIncludePatterns(ctx) +} diff --git a/bundle/bundle_test.go b/bundle/bundle_test.go index 43477efd1..a29aa024b 100644 --- a/bundle/bundle_test.go +++ b/bundle/bundle_test.go @@ -2,25 +2,28 @@ package bundle import ( "context" + "errors" + "io/fs" "os" "path/filepath" "testing" "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestLoadNotExists(t *testing.T) { b, err := Load(context.Background(), "/doesntexist") - assert.True(t, os.IsNotExist(err)) + assert.True(t, errors.Is(err, fs.ErrNotExist)) assert.Nil(t, b) } func TestLoadExists(t *testing.T) { b, err := Load(context.Background(), "./tests/basic") - require.Nil(t, err) - assert.Equal(t, "basic", b.Config.Bundle.Name) + assert.NoError(t, err) + assert.NotNil(t, b) } func TestBundleCacheDir(t *testing.T) { @@ -76,7 +79,7 @@ func TestBundleMustLoadSuccess(t *testing.T) { t.Setenv(env.RootVariable, "./tests/basic") b, err := MustLoad(context.Background()) require.NoError(t, err) - assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path)) + assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath)) } func TestBundleMustLoadFailureWithEnv(t *testing.T) { @@ -86,7 +89,7 @@ func TestBundleMustLoadFailureWithEnv(t *testing.T) { } func TestBundleMustLoadFailureIfNotFound(t *testing.T) { - chdir(t, t.TempDir()) + testutil.Chdir(t, t.TempDir()) _, err := MustLoad(context.Background()) require.Error(t, err, "unable to find bundle root") } @@ -95,7 +98,7 @@ func TestBundleTryLoadSuccess(t *testing.T) { t.Setenv(env.RootVariable, "./tests/basic") b, err := TryLoad(context.Background()) require.NoError(t, err) - assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path)) + assert.Equal(t, "tests/basic", filepath.ToSlash(b.RootPath)) } func TestBundleTryLoadFailureWithEnv(t *testing.T) { @@ -105,7 +108,7 @@ func TestBundleTryLoadFailureWithEnv(t *testing.T) { } func TestBundleTryLoadOkIfNotFound(t *testing.T) { - chdir(t, t.TempDir()) + testutil.Chdir(t, t.TempDir()) b, err := TryLoad(context.Background()) assert.NoError(t, err) assert.Nil(t, b) diff --git a/bundle/config/artifact.go b/bundle/config/artifact.go index 279a8f3b7..219def571 100644 --- a/bundle/config/artifact.go +++ b/bundle/config/artifact.go @@ -3,18 +3,16 @@ package config import ( "context" "fmt" - "path" "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/libs/exec" - "github.com/databricks/databricks-sdk-go/service/compute" ) type Artifacts map[string]*Artifact -func (artifacts Artifacts) SetConfigFilePath(path string) { +func (artifacts Artifacts) ConfigureConfigFilePath() { for _, artifact := range artifacts { - artifact.ConfigFilePath = path + artifact.ConfigureConfigFilePath() } } @@ -23,9 +21,8 @@ type ArtifactType string const ArtifactPythonWheel ArtifactType = `whl` type ArtifactFile struct { - Source string `json:"source"` - RemotePath string `json:"-" bundle:"readonly"` - Libraries []*compute.Library `json:"-" bundle:"readonly"` + Source string `json:"source"` + RemotePath string `json:"remote_path" bundle:"readonly"` } // Artifact defines a single local code artifact that can be @@ -65,36 +62,3 @@ func (a *Artifact) Build(ctx context.Context) ([]byte, error) { } return e.Exec(ctx, a.BuildCommand) } - -func (a *Artifact) NormalisePaths() { - for _, f := range a.Files { - // If no libraries attached, nothing to normalise, skipping - if f.Libraries == nil { - continue - } - - wsfsBase := "/Workspace" - remotePath := path.Join(wsfsBase, f.RemotePath) - for i := range f.Libraries { - lib := f.Libraries[i] - if lib.Whl != "" { - lib.Whl = remotePath - continue - } - if lib.Jar != "" { - lib.Jar = remotePath - continue - } - } - - } -} - -// This function determines if artifact files needs to be uploaded. -// During the bundle processing we analyse which library uses which artifact file. -// If artifact file is used as a library, we store the reference to this library in artifact file Libraries field. -// If artifact file has libraries it's been used in, it means than we need to upload this file. -// Otherwise this artifact file is not used and we skip uploading -func (af *ArtifactFile) NeedsUpload() bool { - return af.Libraries != nil -} diff --git a/bundle/config/bundle.go b/bundle/config/bundle.go index 933e88bfa..78648dfd7 100644 --- a/bundle/config/bundle.go +++ b/bundle/config/bundle.go @@ -25,9 +25,6 @@ type Bundle struct { // For example, where to find the binary, which version to use, etc. Terraform *Terraform `json:"terraform,omitempty" bundle:"readonly"` - // Lock configures locking behavior on deployment. - Lock Lock `json:"lock" bundle:"readonly"` - // Force-override Git branch validation. Force bool `json:"force,omitempty" bundle:"readonly"` @@ -43,4 +40,10 @@ type Bundle struct { // Overrides the compute used for jobs and other supported assets. ComputeID string `json:"compute_id,omitempty"` + + // Deployment section specifies deployment related configuration for bundle + Deployment Deployment `json:"deployment,omitempty"` + + // Databricks CLI version constraints required to run the bundle. + DatabricksCliVersion string `json:"databricks_cli_version,omitempty"` } diff --git a/bundle/config/deployment.go b/bundle/config/deployment.go new file mode 100644 index 000000000..7f0f57a8c --- /dev/null +++ b/bundle/config/deployment.go @@ -0,0 +1,10 @@ +package config + +type Deployment struct { + // FailOnActiveRuns specifies whether to fail the deployment if there are + // running jobs or pipelines in the workspace. Defaults to false. + FailOnActiveRuns bool `json:"fail_on_active_runs,omitempty"` + + // Lock configures locking behavior on deployment. + Lock Lock `json:"lock"` +} diff --git a/bundle/config/experimental.go b/bundle/config/experimental.go index 62d1ae731..12048a322 100644 --- a/bundle/config/experimental.go +++ b/bundle/config/experimental.go @@ -10,6 +10,35 @@ type Experimental struct { // In this case the configured wheel task will be deployed as a notebook task which install defined wheel in runtime and executes it. // For more details see https://github.com/databricks/cli/pull/797 and https://github.com/databricks/cli/pull/635 PythonWheelWrapper bool `json:"python_wheel_wrapper,omitempty"` + + // Enable legacy run_as behavior. That is: + // - Set the run_as identity as the owner of any pipelines in the bundle. + // - Do not error in the presence of resources that do not support run_as. + // As of April 2024 this includes pipelines and model serving endpoints. + // + // This mode of run_as requires the deploying user to be a workspace and metastore + // admin. Use of this flag is not recommend for new bundles, and it is only provided + // to unblock customers that are stuck due to breaking changes in the run_as behavior + // made in https://github.com/databricks/cli/pull/1233. This flag might + // be removed in the future once we have a proper workaround like allowing IS_OWNER + // as a top-level permission in the DAB. + UseLegacyRunAs bool `json:"use_legacy_run_as,omitempty"` + + // PyDABs determines whether to load the 'databricks-pydabs' package. + // + // PyDABs allows to define bundle configuration using Python. + PyDABs PyDABs `json:"pydabs,omitempty"` +} + +type PyDABs struct { + // Enabled is a flag to enable the feature. + Enabled bool `json:"enabled,omitempty"` + + // VEnvPath is path to the virtual environment. + // + // Required if PyDABs is enabled. PyDABs will load the code in the specified + // environment. + VEnvPath string `json:"venv_path,omitempty"` } type Command string diff --git a/bundle/config/generate/job.go b/bundle/config/generate/job.go index 469f84228..28bc86412 100644 --- a/bundle/config/generate/job.go +++ b/bundle/config/generate/job.go @@ -17,12 +17,12 @@ func ConvertJobToValue(job *jobs.Job) (dyn.Value, error) { for _, task := range job.Settings.Tasks { v, err := convertTaskToValue(task, taskOrder) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } tasks = append(tasks, v) } // We're using location lines to define the order of keys in exported YAML. - value["tasks"] = dyn.NewValue(tasks, dyn.Location{Line: jobOrder.Get("tasks")}) + value["tasks"] = dyn.NewValue(tasks, []dyn.Location{{Line: jobOrder.Get("tasks")}}) } return yamlsaver.ConvertToMapValue(job.Settings, jobOrder, []string{"format", "new_cluster", "existing_cluster_id"}, value) diff --git a/bundle/config/git.go b/bundle/config/git.go index 58a5d54d2..f9f2f83e5 100644 --- a/bundle/config/git.go +++ b/bundle/config/git.go @@ -9,8 +9,8 @@ type Git struct { BundleRootPath string `json:"bundle_root_path,omitempty" bundle:"readonly"` // Inferred is set to true if the Git details were inferred and weren't set explicitly - Inferred bool `json:"-" bundle:"readonly"` + Inferred bool `json:"inferred,omitempty" bundle:"readonly"` // The actual branch according to Git (may be different from the configured branch) - ActualBranch string `json:"-" bundle:"readonly"` + ActualBranch string `json:"actual_branch,omitempty" bundle:"readonly"` } diff --git a/bundle/config/interpolation/interpolation.go b/bundle/config/interpolation/interpolation.go deleted file mode 100644 index 8ba0b8b1f..000000000 --- a/bundle/config/interpolation/interpolation.go +++ /dev/null @@ -1,254 +0,0 @@ -package interpolation - -import ( - "context" - "errors" - "fmt" - "reflect" - "regexp" - "sort" - "strings" - - "slices" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/variable" - "golang.org/x/exp/maps" -) - -const Delimiter = "." - -// must start with alphabet, support hyphens and underscores in middle but must end with character -var re = regexp.MustCompile(`\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\}`) - -type stringField struct { - path string - - getter - setter -} - -func newStringField(path string, g getter, s setter) *stringField { - return &stringField{ - path: path, - - getter: g, - setter: s, - } -} - -func (s *stringField) dependsOn() []string { - var out []string - m := re.FindAllStringSubmatch(s.Get(), -1) - for i := range m { - out = append(out, m[i][1]) - } - return out -} - -func (s *stringField) interpolate(fns []LookupFunction, lookup map[string]string) { - out := re.ReplaceAllStringFunc(s.Get(), func(s string) string { - // Turn the whole match into the submatch. - match := re.FindStringSubmatch(s) - for _, fn := range fns { - v, err := fn(match[1], lookup) - if errors.Is(err, ErrSkipInterpolation) { - continue - } - if err != nil { - panic(err) - } - return v - } - - // No substitution. - return s - }) - - s.Set(out) -} - -type accumulator struct { - // all string fields in the bundle config - strings map[string]*stringField - - // contains path -> resolved_string mapping for string fields in the config - // The resolved strings will NOT contain any variable references that could - // have been resolved, however there might still be references that cannot - // be resolved - memo map[string]string -} - -// jsonFieldName returns the name in a field's `json` tag. -// Returns the empty string if it isn't set. -func jsonFieldName(sf reflect.StructField) string { - tag, ok := sf.Tag.Lookup("json") - if !ok { - return "" - } - parts := strings.Split(tag, ",") - if parts[0] == "-" { - return "" - } - return parts[0] -} - -func (a *accumulator) walkStruct(scope []string, rv reflect.Value) { - num := rv.NumField() - for i := 0; i < num; i++ { - sf := rv.Type().Field(i) - f := rv.Field(i) - - // Walk field with the same scope for anonymous (embedded) fields. - if sf.Anonymous { - a.walk(scope, f, anySetter{f}) - continue - } - - // Skip unnamed fields. - fieldName := jsonFieldName(rv.Type().Field(i)) - if fieldName == "" { - continue - } - - a.walk(append(scope, fieldName), f, anySetter{f}) - } -} - -func (a *accumulator) walk(scope []string, rv reflect.Value, s setter) { - // Dereference pointer. - if rv.Type().Kind() == reflect.Pointer { - // Skip nil pointers. - if rv.IsNil() { - return - } - rv = rv.Elem() - s = anySetter{rv} - } - - switch rv.Type().Kind() { - case reflect.String: - path := strings.Join(scope, Delimiter) - a.strings[path] = newStringField(path, anyGetter{rv}, s) - - // register alias for variable value. `var.foo` would be the alias for - // `variables.foo.value` - if len(scope) == 3 && scope[0] == "variables" && scope[2] == "value" { - aliasPath := strings.Join([]string{variable.VariableReferencePrefix, scope[1]}, Delimiter) - a.strings[aliasPath] = a.strings[path] - } - case reflect.Struct: - a.walkStruct(scope, rv) - case reflect.Map: - if rv.Type().Key().Kind() != reflect.String { - panic("only support string keys in map") - } - keys := rv.MapKeys() - for _, key := range keys { - a.walk(append(scope, key.String()), rv.MapIndex(key), mapSetter{rv, key}) - } - case reflect.Slice: - n := rv.Len() - name := scope[len(scope)-1] - base := scope[:len(scope)-1] - for i := 0; i < n; i++ { - element := rv.Index(i) - a.walk(append(base, fmt.Sprintf("%s[%d]", name, i)), element, anySetter{element}) - } - } -} - -// walk and gather all string fields in the config -func (a *accumulator) start(v any) { - rv := reflect.ValueOf(v) - if rv.Type().Kind() != reflect.Pointer { - panic("expect pointer") - } - rv = rv.Elem() - if rv.Type().Kind() != reflect.Struct { - panic("expect struct") - } - - a.strings = make(map[string]*stringField) - a.memo = make(map[string]string) - a.walk([]string{}, rv, nilSetter{}) -} - -// recursively interpolate variables in a depth first manner -func (a *accumulator) Resolve(path string, seenPaths []string, fns ...LookupFunction) error { - // return early if the path is already resolved - if _, ok := a.memo[path]; ok { - return nil - } - - // fetch the string node to resolve - field, ok := a.strings[path] - if !ok { - return fmt.Errorf("no value found for interpolation reference: ${%s}", path) - } - - // return early if the string field has no variables to interpolate - if len(field.dependsOn()) == 0 { - a.memo[path] = field.Get() - return nil - } - - // resolve all variables refered in the root string field - for _, childFieldPath := range field.dependsOn() { - // error if there is a loop in variable interpolation - if slices.Contains(seenPaths, childFieldPath) { - return fmt.Errorf("cycle detected in field resolution: %s", strings.Join(append(seenPaths, childFieldPath), " -> ")) - } - - // recursive resolve variables in the child fields - err := a.Resolve(childFieldPath, append(seenPaths, childFieldPath), fns...) - if err != nil { - return err - } - } - - // interpolate root string once all variable references in it have been resolved - field.interpolate(fns, a.memo) - - // record interpolated string in memo - a.memo[path] = field.Get() - return nil -} - -// Interpolate all string fields in the config -func (a *accumulator) expand(fns ...LookupFunction) error { - // sorting paths for stable order of iteration - paths := maps.Keys(a.strings) - sort.Strings(paths) - - // iterate over paths for all strings fields in the config - for _, path := range paths { - err := a.Resolve(path, []string{path}, fns...) - if err != nil { - return err - } - } - return nil -} - -type interpolate struct { - fns []LookupFunction -} - -func (m *interpolate) expand(v any) error { - a := accumulator{} - a.start(v) - return a.expand(m.fns...) -} - -func Interpolate(fns ...LookupFunction) bundle.Mutator { - return &interpolate{fns: fns} -} - -func (m *interpolate) Name() string { - return "Interpolate" -} - -func (m *interpolate) Apply(_ context.Context, b *bundle.Bundle) error { - return m.expand(&b.Config) -} diff --git a/bundle/config/interpolation/interpolation_test.go b/bundle/config/interpolation/interpolation_test.go deleted file mode 100644 index cccb6dc71..000000000 --- a/bundle/config/interpolation/interpolation_test.go +++ /dev/null @@ -1,251 +0,0 @@ -package interpolation - -import ( - "testing" - - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/variable" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type nest struct { - X string `json:"x"` - Y *string `json:"y"` - Z map[string]string `json:"z"` -} - -type foo struct { - A string `json:"a"` - B string `json:"b"` - C string `json:"c"` - - // Pointer field - D *string `json:"d"` - - // Struct field - E nest `json:"e"` - - // Map field - F map[string]string `json:"f"` -} - -func expand(v any) error { - a := accumulator{} - a.start(v) - return a.expand(DefaultLookup) -} - -func TestInterpolationVariables(t *testing.T) { - f := foo{ - A: "a", - B: "${a}", - C: "${a}", - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "a", f.B) - assert.Equal(t, "a", f.C) -} - -func TestInterpolationVariablesSpecialChars(t *testing.T) { - type bar struct { - A string `json:"a-b"` - B string `json:"b_c"` - C string `json:"c-_a"` - } - f := bar{ - A: "a", - B: "${a-b}", - C: "${a-b}", - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "a", f.B) - assert.Equal(t, "a", f.C) -} - -func TestInterpolationValidMatches(t *testing.T) { - expectedMatches := map[string]string{ - "${hello_world.world_world}": "hello_world.world_world", - "${helloworld.world-world}": "helloworld.world-world", - "${hello-world.world-world}": "hello-world.world-world", - } - for interpolationStr, expectedMatch := range expectedMatches { - match := re.FindStringSubmatch(interpolationStr) - assert.True(t, len(match) > 0, - "Failed to match %s and find %s", interpolationStr, expectedMatch) - assert.Equal(t, expectedMatch, match[1], - "Failed to match the exact pattern %s and find %s", interpolationStr, expectedMatch) - } -} - -func TestInterpolationInvalidMatches(t *testing.T) { - invalidMatches := []string{ - "${hello_world-.world_world}", // the first segment ending must not end with hyphen (-) - "${hello_world-_.world_world}", // the first segment ending must not end with underscore (_) - "${helloworld.world-world-}", // second segment must not end with hyphen (-) - "${helloworld-.world-world}", // first segment must not end with hyphen (-) - "${helloworld.-world-world}", // second segment must not start with hyphen (-) - "${-hello-world.-world-world-}", // must not start or end with hyphen (-) - "${_-_._-_.id}", // cannot use _- in sequence - "${0helloworld.world-world}", // interpolated first section shouldn't start with number - "${helloworld.9world-world}", // interpolated second section shouldn't start with number - "${a-a.a-_a-a.id}", // fails because of -_ in the second segment - "${a-a.a--a-a.id}", // fails because of -- in the second segment - } - for _, invalidMatch := range invalidMatches { - match := re.FindStringSubmatch(invalidMatch) - assert.True(t, len(match) == 0, "Should be invalid interpolation: %s", invalidMatch) - } -} - -func TestInterpolationWithPointers(t *testing.T) { - fd := "${a}" - f := foo{ - A: "a", - D: &fd, - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "a", *f.D) -} - -func TestInterpolationWithStruct(t *testing.T) { - fy := "${e.x}" - f := foo{ - A: "${e.x}", - E: nest{ - X: "x", - Y: &fy, - }, - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "x", f.A) - assert.Equal(t, "x", f.E.X) - assert.Equal(t, "x", *f.E.Y) -} - -func TestInterpolationWithMap(t *testing.T) { - f := foo{ - A: "${f.a}", - F: map[string]string{ - "a": "a", - "b": "${f.a}", - }, - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "a", f.F["a"]) - assert.Equal(t, "a", f.F["b"]) -} - -func TestInterpolationWithResursiveVariableReferences(t *testing.T) { - f := foo{ - A: "a", - B: "(${a})", - C: "${a} ${b}", - } - - err := expand(&f) - require.NoError(t, err) - - assert.Equal(t, "a", f.A) - assert.Equal(t, "(a)", f.B) - assert.Equal(t, "a (a)", f.C) -} - -func TestInterpolationVariableLoopError(t *testing.T) { - d := "${b}" - f := foo{ - A: "a", - B: "${c}", - C: "${d}", - D: &d, - } - - err := expand(&f) - assert.ErrorContains(t, err, "cycle detected in field resolution: b -> c -> d -> b") -} - -func TestInterpolationForVariables(t *testing.T) { - foo := "abc" - bar := "${var.foo} def" - apple := "${var.foo} ${var.bar}" - config := config.Root{ - Variables: map[string]*variable.Variable{ - "foo": { - Value: &foo, - }, - "bar": { - Value: &bar, - }, - "apple": { - Value: &apple, - }, - }, - Bundle: config.Bundle{ - Name: "${var.apple} ${var.foo}", - }, - } - - err := expand(&config) - assert.NoError(t, err) - assert.Equal(t, "abc", *(config.Variables["foo"].Value)) - assert.Equal(t, "abc def", *(config.Variables["bar"].Value)) - assert.Equal(t, "abc abc def", *(config.Variables["apple"].Value)) - assert.Equal(t, "abc abc def abc", config.Bundle.Name) -} - -func TestInterpolationLoopForVariables(t *testing.T) { - foo := "${var.bar}" - bar := "${var.foo}" - config := config.Root{ - Variables: map[string]*variable.Variable{ - "foo": { - Value: &foo, - }, - "bar": { - Value: &bar, - }, - }, - Bundle: config.Bundle{ - Name: "${var.foo}", - }, - } - - err := expand(&config) - assert.ErrorContains(t, err, "cycle detected in field resolution: bundle.name -> var.foo -> var.bar -> var.foo") -} - -func TestInterpolationInvalidVariableReference(t *testing.T) { - foo := "abc" - config := config.Root{ - Variables: map[string]*variable.Variable{ - "foo": { - Value: &foo, - }, - }, - Bundle: config.Bundle{ - Name: "${vars.foo}", - }, - } - - err := expand(&config) - assert.ErrorContains(t, err, "no value found for interpolation reference: ${vars.foo}") -} diff --git a/bundle/config/interpolation/lookup.go b/bundle/config/interpolation/lookup.go deleted file mode 100644 index 3dc5047a7..000000000 --- a/bundle/config/interpolation/lookup.go +++ /dev/null @@ -1,51 +0,0 @@ -package interpolation - -import ( - "errors" - "fmt" - "slices" - "strings" -) - -// LookupFunction returns the value to rewrite a path expression to. -type LookupFunction func(path string, depends map[string]string) (string, error) - -// ErrSkipInterpolation can be used to fall through from [LookupFunction]. -var ErrSkipInterpolation = errors.New("skip interpolation") - -// DefaultLookup looks up the specified path in the map. -// It returns an error if it doesn't exist. -func DefaultLookup(path string, lookup map[string]string) (string, error) { - v, ok := lookup[path] - if !ok { - return "", fmt.Errorf("expected to find value for path: %s", path) - } - return v, nil -} - -func pathPrefixMatches(prefix []string, path string) bool { - parts := strings.Split(path, Delimiter) - return len(parts) >= len(prefix) && slices.Compare(prefix, parts[0:len(prefix)]) == 0 -} - -// ExcludeLookupsInPath is a lookup function that skips lookups for the specified path. -func ExcludeLookupsInPath(exclude ...string) LookupFunction { - return func(path string, lookup map[string]string) (string, error) { - if pathPrefixMatches(exclude, path) { - return "", ErrSkipInterpolation - } - - return DefaultLookup(path, lookup) - } -} - -// IncludeLookupsInPath is a lookup function that limits lookups to the specified path. -func IncludeLookupsInPath(include ...string) LookupFunction { - return func(path string, lookup map[string]string) (string, error) { - if !pathPrefixMatches(include, path) { - return "", ErrSkipInterpolation - } - - return DefaultLookup(path, lookup) - } -} diff --git a/bundle/config/interpolation/lookup_test.go b/bundle/config/interpolation/lookup_test.go deleted file mode 100644 index 61628bf04..000000000 --- a/bundle/config/interpolation/lookup_test.go +++ /dev/null @@ -1,81 +0,0 @@ -package interpolation - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -type interpolationFixture struct { - A map[string]string `json:"a"` - B map[string]string `json:"b"` - C map[string]string `json:"c"` -} - -func fixture() interpolationFixture { - return interpolationFixture{ - A: map[string]string{ - "x": "1", - }, - B: map[string]string{ - "x": "2", - }, - C: map[string]string{ - "ax": "${a.x}", - "bx": "${b.x}", - }, - } -} - -func TestExcludePath(t *testing.T) { - tmp := fixture() - m := interpolate{ - fns: []LookupFunction{ - ExcludeLookupsInPath("a"), - }, - } - - err := m.expand(&tmp) - require.NoError(t, err) - - assert.Equal(t, "1", tmp.A["x"]) - assert.Equal(t, "2", tmp.B["x"]) - assert.Equal(t, "${a.x}", tmp.C["ax"]) - assert.Equal(t, "2", tmp.C["bx"]) -} - -func TestIncludePath(t *testing.T) { - tmp := fixture() - m := interpolate{ - fns: []LookupFunction{ - IncludeLookupsInPath("a"), - }, - } - - err := m.expand(&tmp) - require.NoError(t, err) - - assert.Equal(t, "1", tmp.A["x"]) - assert.Equal(t, "2", tmp.B["x"]) - assert.Equal(t, "1", tmp.C["ax"]) - assert.Equal(t, "${b.x}", tmp.C["bx"]) -} - -func TestIncludePathMultiple(t *testing.T) { - tmp := fixture() - m := interpolate{ - fns: []LookupFunction{ - IncludeLookupsInPath("a"), - IncludeLookupsInPath("b"), - }, - } - - err := m.expand(&tmp) - require.NoError(t, err) - - assert.Equal(t, "1", tmp.A["x"]) - assert.Equal(t, "2", tmp.B["x"]) - assert.Equal(t, "1", tmp.C["ax"]) - assert.Equal(t, "2", tmp.C["bx"]) -} diff --git a/bundle/config/interpolation/setter.go b/bundle/config/interpolation/setter.go deleted file mode 100644 index cce39c611..000000000 --- a/bundle/config/interpolation/setter.go +++ /dev/null @@ -1,48 +0,0 @@ -package interpolation - -import "reflect" - -// String values in maps are not addressable and therefore not settable -// through Go's reflection mechanism. This interface solves this limitation -// by wrapping the setter differently for addressable values and map values. -type setter interface { - Set(string) -} - -type nilSetter struct{} - -func (nilSetter) Set(_ string) { - panic("nil setter") -} - -type anySetter struct { - rv reflect.Value -} - -func (s anySetter) Set(str string) { - s.rv.SetString(str) -} - -type mapSetter struct { - // map[string]string - m reflect.Value - - // key - k reflect.Value -} - -func (s mapSetter) Set(str string) { - s.m.SetMapIndex(s.k, reflect.ValueOf(str)) -} - -type getter interface { - Get() string -} - -type anyGetter struct { - rv reflect.Value -} - -func (g anyGetter) Get() string { - return g.rv.String() -} diff --git a/bundle/config/loader/entry_point.go b/bundle/config/loader/entry_point.go new file mode 100644 index 000000000..2c73a5825 --- /dev/null +++ b/bundle/config/loader/entry_point.go @@ -0,0 +1,36 @@ +package loader + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" +) + +type entryPoint struct{} + +// EntryPoint loads the entry point configuration. +func EntryPoint() bundle.Mutator { + return &entryPoint{} +} + +func (m *entryPoint) Name() string { + return "EntryPoint" +} + +func (m *entryPoint) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + path, err := config.FileNames.FindInPath(b.RootPath) + if err != nil { + return diag.FromErr(err) + } + this, diags := config.Load(path) + if diags.HasError() { + return diags + } + err = b.Config.Merge(this) + if err != nil { + diags = diags.Extend(diag.FromErr(err)) + } + return diags +} diff --git a/bundle/config/loader/entry_point_test.go b/bundle/config/loader/entry_point_test.go new file mode 100644 index 000000000..80271f0b7 --- /dev/null +++ b/bundle/config/loader/entry_point_test.go @@ -0,0 +1,26 @@ +package loader_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/loader" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEntryPointNoRootPath(t *testing.T) { + b := &bundle.Bundle{} + diags := bundle.Apply(context.Background(), b, loader.EntryPoint()) + require.Error(t, diags.Error()) +} + +func TestEntryPoint(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "testdata", + } + diags := bundle.Apply(context.Background(), b, loader.EntryPoint()) + require.NoError(t, diags.Error()) + assert.Equal(t, "loader_test", b.Config.Bundle.Name) +} diff --git a/bundle/config/mutator/process_include.go b/bundle/config/loader/process_include.go similarity index 70% rename from bundle/config/mutator/process_include.go rename to bundle/config/loader/process_include.go index 350c3c49c..7cf9a17d7 100644 --- a/bundle/config/mutator/process_include.go +++ b/bundle/config/loader/process_include.go @@ -1,4 +1,4 @@ -package mutator +package loader import ( "context" @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" ) type processInclude struct { @@ -25,10 +26,14 @@ func (m *processInclude) Name() string { return fmt.Sprintf("ProcessInclude(%s)", m.relPath) } -func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) error { - this, err := config.Load(m.fullPath) - if err != nil { - return err +func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + this, diags := config.Load(m.fullPath) + if diags.HasError() { + return diags } - return b.Config.Merge(this) + err := b.Config.Merge(this) + if err != nil { + diags = diags.Extend(diag.FromErr(err)) + } + return diags } diff --git a/bundle/config/mutator/process_include_test.go b/bundle/config/loader/process_include_test.go similarity index 51% rename from bundle/config/mutator/process_include_test.go rename to bundle/config/loader/process_include_test.go index 7ca5d1981..da4da9ff6 100644 --- a/bundle/config/mutator/process_include_test.go +++ b/bundle/config/loader/process_include_test.go @@ -1,38 +1,35 @@ -package mutator_test +package loader_test import ( "context" - "fmt" - "os" "path/filepath" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/loader" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestProcessInclude(t *testing.T) { b := &bundle.Bundle{ + RootPath: "testdata", Config: config.Root{ - Path: t.TempDir(), Workspace: config.Workspace{ Host: "foo", }, }, } - relPath := "./file.yml" - fullPath := filepath.Join(b.Config.Path, relPath) - f, err := os.Create(fullPath) - require.NoError(t, err) - fmt.Fprint(f, "workspace:\n host: bar\n") - f.Close() + m := loader.ProcessInclude(filepath.Join(b.RootPath, "host.yml"), "host.yml") + assert.Equal(t, "ProcessInclude(host.yml)", m.Name()) + // Assert the host value prior to applying the mutator assert.Equal(t, "foo", b.Config.Workspace.Host) - err = bundle.Apply(context.Background(), b, mutator.ProcessInclude(fullPath, relPath)) - require.NoError(t, err) + + // Apply the mutator and assert that the host value has been updated + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) assert.Equal(t, "bar", b.Config.Workspace.Host) } diff --git a/bundle/config/mutator/process_root_includes.go b/bundle/config/loader/process_root_includes.go similarity index 62% rename from bundle/config/mutator/process_root_includes.go rename to bundle/config/loader/process_root_includes.go index 5a5ab1b19..25f284fd3 100644 --- a/bundle/config/mutator/process_root_includes.go +++ b/bundle/config/loader/process_root_includes.go @@ -1,27 +1,16 @@ -package mutator +package loader import ( "context" - "fmt" - "os" "path/filepath" "slices" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/libs/diag" ) -// Get extra include paths from environment variable -func getExtraIncludePaths(ctx context.Context) []string { - value, exists := env.Includes(ctx) - if !exists { - return nil - } - return strings.Split(value, string(os.PathListSeparator)) -} - type processRootIncludes struct{} // ProcessRootIncludes expands the patterns in the configuration's include list @@ -34,7 +23,7 @@ func (m *processRootIncludes) Name() string { return "ProcessRootIncludes" } -func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { var out []bundle.Mutator // Map with files we've already seen to avoid loading them twice. @@ -48,45 +37,33 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error // This is stored in the bundle configuration for observability. var files []string - // Converts extra include paths from environment variable to relative paths - for _, extraIncludePath := range getExtraIncludePaths(ctx) { - if filepath.IsAbs(extraIncludePath) { - rel, err := filepath.Rel(b.Config.Path, extraIncludePath) - if err != nil { - return fmt.Errorf("unable to include file '%s': %w", extraIncludePath, err) - } - extraIncludePath = rel - } - b.Config.Include = append(b.Config.Include, extraIncludePath) - } - // For each glob, find all files to load. // Ordering of the list of globs is maintained in the output. // For matches that appear in multiple globs, only the first is kept. for _, entry := range b.Config.Include { // Include paths must be relative. if filepath.IsAbs(entry) { - return fmt.Errorf("%s: includes must be relative paths", entry) + return diag.Errorf("%s: includes must be relative paths", entry) } // Anchor includes to the bundle root path. - matches, err := filepath.Glob(filepath.Join(b.Config.Path, entry)) + matches, err := filepath.Glob(filepath.Join(b.RootPath, entry)) if err != nil { - return err + return diag.FromErr(err) } // If the entry is not a glob pattern and no matches found, // return an error because the file defined is not found if len(matches) == 0 && !strings.ContainsAny(entry, "*?[") { - return fmt.Errorf("%s defined in 'include' section does not match any files", entry) + return diag.Errorf("%s defined in 'include' section does not match any files", entry) } // Filter matches to ones we haven't seen yet. var includes []string for _, match := range matches { - rel, err := filepath.Rel(b.Config.Path, match) + rel, err := filepath.Rel(b.RootPath, match) if err != nil { - return err + return diag.FromErr(err) } if _, ok := seen[rel]; ok { continue @@ -99,7 +76,7 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error slices.Sort(includes) files = append(files, includes...) for _, include := range includes { - out = append(out, ProcessInclude(filepath.Join(b.Config.Path, include), include)) + out = append(out, ProcessInclude(filepath.Join(b.RootPath, include), include)) } } diff --git a/bundle/config/loader/process_root_includes_test.go b/bundle/config/loader/process_root_includes_test.go new file mode 100644 index 000000000..737dbbefd --- /dev/null +++ b/bundle/config/loader/process_root_includes_test.go @@ -0,0 +1,113 @@ +package loader_test + +import ( + "context" + "runtime" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/loader" + "github.com/databricks/cli/internal/testutil" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProcessRootIncludesEmpty(t *testing.T) { + b := &bundle.Bundle{ + RootPath: ".", + } + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) + require.NoError(t, diags.Error()) +} + +func TestProcessRootIncludesAbs(t *testing.T) { + // remove this once equivalent tests for windows have been set up + // or this test has been fixed for windows + // date: 28 Nov 2022 + if runtime.GOOS == "windows" { + t.Skip("skipping temperorilty to make windows unit tests green") + } + + b := &bundle.Bundle{ + RootPath: ".", + Config: config.Root{ + Include: []string{ + "/tmp/*.yml", + }, + }, + } + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) + require.True(t, diags.HasError()) + assert.ErrorContains(t, diags.Error(), "must be relative paths") +} + +func TestProcessRootIncludesSingleGlob(t *testing.T) { + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Include: []string{ + "*.yml", + }, + }, + } + + testutil.Touch(t, b.RootPath, "databricks.yml") + testutil.Touch(t, b.RootPath, "a.yml") + testutil.Touch(t, b.RootPath, "b.yml") + + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) + require.NoError(t, diags.Error()) + assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include) +} + +func TestProcessRootIncludesMultiGlob(t *testing.T) { + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Include: []string{ + "a*.yml", + "b*.yml", + }, + }, + } + + testutil.Touch(t, b.RootPath, "a1.yml") + testutil.Touch(t, b.RootPath, "b1.yml") + + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) + require.NoError(t, diags.Error()) + assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include) +} + +func TestProcessRootIncludesRemoveDups(t *testing.T) { + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Include: []string{ + "*.yml", + "*.yml", + }, + }, + } + + testutil.Touch(t, b.RootPath, "a.yml") + + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) + require.NoError(t, diags.Error()) + assert.Equal(t, []string{"a.yml"}, b.Config.Include) +} + +func TestProcessRootIncludesNotExists(t *testing.T) { + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Include: []string{ + "notexist.yml", + }, + }, + } + diags := bundle.Apply(context.Background(), b, loader.ProcessRootIncludes()) + require.True(t, diags.HasError()) + assert.ErrorContains(t, diags.Error(), "notexist.yml defined in 'include' section does not match any files") +} diff --git a/bundle/config/loader/testdata/databricks.yml b/bundle/config/loader/testdata/databricks.yml new file mode 100644 index 000000000..1a0635b89 --- /dev/null +++ b/bundle/config/loader/testdata/databricks.yml @@ -0,0 +1,2 @@ +bundle: + name: loader_test diff --git a/bundle/config/loader/testdata/host.yml b/bundle/config/loader/testdata/host.yml new file mode 100644 index 000000000..f83830d1d --- /dev/null +++ b/bundle/config/loader/testdata/host.yml @@ -0,0 +1,2 @@ +workspace: + host: bar diff --git a/bundle/config/lock.go b/bundle/config/lock.go index 760099a95..10e9e1c9c 100644 --- a/bundle/config/lock.go +++ b/bundle/config/lock.go @@ -1,7 +1,7 @@ package config type Lock struct { - // Enabled toggles deployment lock. True by default. + // Enabled toggles deployment lock. True by default except in development mode. // Use a pointer value so that only explicitly configured values are set // and we don't merge configuration with zero-initialized values. Enabled *bool `json:"enabled,omitempty"` @@ -11,9 +11,20 @@ type Lock struct { Force bool `json:"force,omitempty"` } +// IsEnabled checks if the deployment lock is enabled. func (lock Lock) IsEnabled() bool { if lock.Enabled != nil { return *lock.Enabled } return true } + +// IsExplicitlyEnabled checks if the deployment lock is explicitly enabled. +// Only returns true if locking is explicitly set using a command-line +// flag or configuration file. +func (lock Lock) IsExplicitlyEnabled() bool { + if lock.Enabled != nil { + return *lock.Enabled + } + return false +} diff --git a/bundle/config/mutator/configure_wsfs.go b/bundle/config/mutator/configure_wsfs.go new file mode 100644 index 000000000..17af4828f --- /dev/null +++ b/bundle/config/mutator/configure_wsfs.go @@ -0,0 +1,50 @@ +package mutator + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/vfs" +) + +const envDatabricksRuntimeVersion = "DATABRICKS_RUNTIME_VERSION" + +type configureWSFS struct{} + +func ConfigureWSFS() bundle.Mutator { + return &configureWSFS{} +} + +func (m *configureWSFS) Name() string { + return "ConfigureWSFS" +} + +func (m *configureWSFS) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + root := b.BundleRoot.Native() + + // The bundle root must be located in /Workspace/ + if !strings.HasPrefix(root, "/Workspace/") { + return nil + } + + // The executable must be running on DBR. + if _, ok := env.Lookup(ctx, envDatabricksRuntimeVersion); !ok { + return nil + } + + // If so, swap out vfs.Path instance of the sync root with one that + // makes all Workspace File System interactions extension aware. + p, err := vfs.NewFilerPath(ctx, root, func(path string) (filer.Filer, error) { + return filer.NewWorkspaceFilesExtensionsClient(b.WorkspaceClient(), path) + }) + if err != nil { + return diag.FromErr(err) + } + + b.BundleRoot = p + return nil +} diff --git a/bundle/config/mutator/default_queueing.go b/bundle/config/mutator/default_queueing.go new file mode 100644 index 000000000..ead77c7a8 --- /dev/null +++ b/bundle/config/mutator/default_queueing.go @@ -0,0 +1,38 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +type defaultQueueing struct{} + +func DefaultQueueing() bundle.Mutator { + return &defaultQueueing{} +} + +func (m *defaultQueueing) Name() string { + return "DefaultQueueing" +} + +// Enable queueing for jobs by default, following the behavior from API 2.2+. +// As of 2024-04, we're still using API 2.1 which has queueing disabled by default. +// This mutator makes sure queueing is enabled by default before we can adopt API 2.2. +func (m *defaultQueueing) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + r := b.Config.Resources + for i := range r.Jobs { + if r.Jobs[i].JobSettings == nil { + r.Jobs[i].JobSettings = &jobs.JobSettings{} + } + if r.Jobs[i].Queue != nil { + continue + } + r.Jobs[i].Queue = &jobs.QueueSettings{ + Enabled: true, + } + } + return nil +} diff --git a/bundle/config/mutator/default_queueing_test.go b/bundle/config/mutator/default_queueing_test.go new file mode 100644 index 000000000..d3621663b --- /dev/null +++ b/bundle/config/mutator/default_queueing_test.go @@ -0,0 +1,103 @@ +package mutator + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +func TestDefaultQueueing(t *testing.T) { + m := DefaultQueueing() + assert.IsType(t, &defaultQueueing{}, m) +} + +func TestDefaultQueueingName(t *testing.T) { + m := DefaultQueueing() + assert.Equal(t, "DefaultQueueing", m.Name()) +} + +func TestDefaultQueueingApplyNoJobs(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{}, + }, + } + d := bundle.Apply(context.Background(), b, DefaultQueueing()) + assert.Len(t, d, 0) + assert.Len(t, b.Config.Resources.Jobs, 0) +} + +func TestDefaultQueueingApplyJobsAlreadyEnabled(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Queue: &jobs.QueueSettings{Enabled: true}, + }, + }, + }, + }, + }, + } + d := bundle.Apply(context.Background(), b, DefaultQueueing()) + assert.Len(t, d, 0) + assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled) +} + +func TestDefaultQueueingApplyEnableQueueing(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Name: "job", + }, + }, + }, + }, + }, + } + d := bundle.Apply(context.Background(), b, DefaultQueueing()) + assert.Len(t, d, 0) + assert.NotNil(t, b.Config.Resources.Jobs["job"].Queue) + assert.True(t, b.Config.Resources.Jobs["job"].Queue.Enabled) +} + +func TestDefaultQueueingApplyWithMultipleJobs(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Queue: &jobs.QueueSettings{Enabled: false}, + }, + }, + "job2": { + JobSettings: &jobs.JobSettings{ + Name: "job", + }, + }, + "job3": { + JobSettings: &jobs.JobSettings{ + Queue: &jobs.QueueSettings{Enabled: true}, + }, + }, + }, + }, + }, + } + d := bundle.Apply(context.Background(), b, DefaultQueueing()) + assert.Len(t, d, 0) + assert.False(t, b.Config.Resources.Jobs["job1"].Queue.Enabled) + assert.True(t, b.Config.Resources.Jobs["job2"].Queue.Enabled) + assert.True(t, b.Config.Resources.Jobs["job3"].Queue.Enabled) +} diff --git a/bundle/config/mutator/default_target.go b/bundle/config/mutator/default_target.go index d5318a3e2..73d99002a 100644 --- a/bundle/config/mutator/default_target.go +++ b/bundle/config/mutator/default_target.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" ) type defineDefaultTarget struct { @@ -24,7 +25,7 @@ func (m *defineDefaultTarget) Name() string { return fmt.Sprintf("DefineDefaultTarget(%s)", m.name) } -func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { // Nothing to do if the configuration has at least 1 target. if len(b.Config.Targets) > 0 { return nil diff --git a/bundle/config/mutator/default_target_test.go b/bundle/config/mutator/default_target_test.go index 61a5a0138..d60b14aad 100644 --- a/bundle/config/mutator/default_target_test.go +++ b/bundle/config/mutator/default_target_test.go @@ -13,8 +13,9 @@ import ( func TestDefaultTarget(t *testing.T) { b := &bundle.Bundle{} - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) + require.NoError(t, diags.Error()) + env, ok := b.Config.Targets["default"] assert.True(t, ok) assert.Equal(t, &config.Target{}, env) @@ -28,8 +29,9 @@ func TestDefaultTargetAlreadySpecified(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultTarget()) + require.NoError(t, diags.Error()) + _, ok := b.Config.Targets["default"] assert.False(t, ok) } diff --git a/bundle/config/mutator/default_workspace_paths.go b/bundle/config/mutator/default_workspace_paths.go index 04f2b0dc0..71e562b51 100644 --- a/bundle/config/mutator/default_workspace_paths.go +++ b/bundle/config/mutator/default_workspace_paths.go @@ -2,10 +2,10 @@ package mutator import ( "context" - "fmt" "path" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type defineDefaultWorkspacePaths struct{} @@ -19,10 +19,10 @@ func (m *defineDefaultWorkspacePaths) Name() string { return "DefaultWorkspacePaths" } -func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { root := b.Config.Workspace.RootPath if root == "" { - return fmt.Errorf("unable to define default workspace paths: workspace root not defined") + return diag.Errorf("unable to define default workspace paths: workspace root not defined") } if b.Config.Workspace.FilePath == "" { diff --git a/bundle/config/mutator/default_workspace_paths_test.go b/bundle/config/mutator/default_workspace_paths_test.go index 1ad0ca786..0ba20ea2b 100644 --- a/bundle/config/mutator/default_workspace_paths_test.go +++ b/bundle/config/mutator/default_workspace_paths_test.go @@ -19,8 +19,8 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) + require.NoError(t, diags.Error()) assert.Equal(t, "/files", b.Config.Workspace.FilePath) assert.Equal(t, "/artifacts", b.Config.Workspace.ArtifactPath) assert.Equal(t, "/state", b.Config.Workspace.StatePath) @@ -37,8 +37,8 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspacePaths()) + require.NoError(t, diags.Error()) assert.Equal(t, "/foo/bar", b.Config.Workspace.FilePath) assert.Equal(t, "/foo/bar", b.Config.Workspace.ArtifactPath) assert.Equal(t, "/foo/bar", b.Config.Workspace.StatePath) diff --git a/bundle/config/mutator/default_workspace_root.go b/bundle/config/mutator/default_workspace_root.go index 260a59584..d7c24a5b5 100644 --- a/bundle/config/mutator/default_workspace_root.go +++ b/bundle/config/mutator/default_workspace_root.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type defineDefaultWorkspaceRoot struct{} @@ -18,17 +19,17 @@ func (m *defineDefaultWorkspaceRoot) Name() string { return "DefineDefaultWorkspaceRoot" } -func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Workspace.RootPath != "" { return nil } if b.Config.Bundle.Name == "" { - return fmt.Errorf("unable to define default workspace root: bundle name not defined") + return diag.Errorf("unable to define default workspace root: bundle name not defined") } if b.Config.Bundle.Target == "" { - return fmt.Errorf("unable to define default workspace root: bundle target not selected") + return diag.Errorf("unable to define default workspace root: bundle target not selected") } b.Config.Workspace.RootPath = fmt.Sprintf( diff --git a/bundle/config/mutator/default_workspace_root_test.go b/bundle/config/mutator/default_workspace_root_test.go index 9dd549a39..b05520f62 100644 --- a/bundle/config/mutator/default_workspace_root_test.go +++ b/bundle/config/mutator/default_workspace_root_test.go @@ -20,7 +20,8 @@ func TestDefaultWorkspaceRoot(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.DefineDefaultWorkspaceRoot()) + require.NoError(t, diags.Error()) + assert.Equal(t, "~/.bundle/name/environment", b.Config.Workspace.RootPath) } diff --git a/bundle/config/mutator/environments_compat.go b/bundle/config/mutator/environments_compat.go new file mode 100644 index 000000000..fb898edea --- /dev/null +++ b/bundle/config/mutator/environments_compat.go @@ -0,0 +1,66 @@ +package mutator + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type environmentsToTargets struct{} + +func EnvironmentsToTargets() bundle.Mutator { + return &environmentsToTargets{} +} + +func (m *environmentsToTargets) Name() string { + return "EnvironmentsToTargets" +} + +func (m *environmentsToTargets) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + // Short circuit if the "environments" key is not set. + // This is the common case. + if b.Config.Environments == nil { + return nil + } + + // The "environments" key is set; validate and rewrite it to "targets". + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + environments := v.Get("environments") + targets := v.Get("targets") + + // Return an error if both "environments" and "targets" are set. + if environments.Kind() != dyn.KindInvalid && targets.Kind() != dyn.KindInvalid { + return dyn.InvalidValue, fmt.Errorf( + "both 'environments' and 'targets' are specified; only 'targets' should be used: %s", + environments.Location().String(), + ) + } + + // Rewrite "environments" to "targets". + if environments.Kind() != dyn.KindInvalid && targets.Kind() == dyn.KindInvalid { + nv, err := dyn.Set(v, "targets", environments) + if err != nil { + return dyn.InvalidValue, err + } + // Drop the "environments" key. + return dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + switch len(p) { + case 0: + return v, nil + case 1: + if p[0] == dyn.Key("environments") { + return v, dyn.ErrDrop + } + } + return v, dyn.ErrSkip + }) + } + + return v, nil + }) + + return diag.FromErr(err) +} diff --git a/bundle/config/mutator/environments_compat_test.go b/bundle/config/mutator/environments_compat_test.go new file mode 100644 index 000000000..8a2129847 --- /dev/null +++ b/bundle/config/mutator/environments_compat_test.go @@ -0,0 +1,66 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestEnvironmentsToTargetsWithBothDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Environments: map[string]*config.Target{ + "name": { + Mode: config.Development, + }, + }, + Targets: map[string]*config.Target{ + "name": { + Mode: config.Development, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) + assert.ErrorContains(t, diags.Error(), `both 'environments' and 'targets' are specified;`) +} + +func TestEnvironmentsToTargetsWithEnvironmentsDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Environments: map[string]*config.Target{ + "name": { + Mode: config.Development, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) + require.NoError(t, diags.Error()) + assert.Len(t, b.Config.Environments, 0) + assert.Len(t, b.Config.Targets, 1) +} + +func TestEnvironmentsToTargetsWithTargetsDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Targets: map[string]*config.Target{ + "name": { + Mode: config.Development, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.EnvironmentsToTargets()) + require.NoError(t, diags.Error()) + assert.Len(t, b.Config.Environments, 0) + assert.Len(t, b.Config.Targets, 1) +} diff --git a/bundle/config/mutator/expand_pipeline_glob_paths.go b/bundle/config/mutator/expand_pipeline_glob_paths.go index cb1477784..5703332fa 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths.go @@ -7,7 +7,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" ) type expandPipelineGlobPaths struct{} @@ -16,77 +17,96 @@ func ExpandPipelineGlobPaths() bundle.Mutator { return &expandPipelineGlobPaths{} } -func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) error { - for key, pipeline := range b.Config.Resources.Pipelines { - dir, err := pipeline.ConfigFileDirectory() +func (m *expandPipelineGlobPaths) expandLibrary(v dyn.Value) ([]dyn.Value, error) { + // Probe for the path field in the library. + for _, p := range []dyn.Path{ + dyn.NewPath(dyn.Key("notebook"), dyn.Key("path")), + dyn.NewPath(dyn.Key("file"), dyn.Key("path")), + } { + pv, err := dyn.GetByPath(v, p) + if dyn.IsNoSuchKeyError(err) { + continue + } if err != nil { - return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) + return nil, err } - expandedLibraries := make([]pipelines.PipelineLibrary, 0) - for i := 0; i < len(pipeline.Libraries); i++ { + // If the path is empty or not a local path, return the original value. + path := pv.MustString() + if path == "" || !libraries.IsLocalPath(path) { + return []dyn.Value{v}, nil + } - library := &pipeline.Libraries[i] - path := getGlobPatternToExpand(library) - if path == "" || !libraries.IsLocalPath(path) { - expandedLibraries = append(expandedLibraries, *library) - continue - } + dir, err := v.Location().Directory() + if err != nil { + return nil, err + } - matches, err := filepath.Glob(filepath.Join(dir, path)) + matches, err := filepath.Glob(filepath.Join(dir, path)) + if err != nil { + return nil, err + } + + // If there are no matches, return the original value. + if len(matches) == 0 { + return []dyn.Value{v}, nil + } + + // Emit a new value for each match. + var ev []dyn.Value + for _, match := range matches { + m, err := filepath.Rel(dir, match) if err != nil { - return err + return nil, err } - - if len(matches) == 0 { - expandedLibraries = append(expandedLibraries, *library) - continue - } - - for _, match := range matches { - m, err := filepath.Rel(dir, match) - if err != nil { - return err - } - expandedLibraries = append(expandedLibraries, cloneWithPath(library, m)) + nv, err := dyn.SetByPath(v, p, dyn.NewValue(m, pv.Locations())) + if err != nil { + return nil, err } + ev = append(ev, nv) } - pipeline.Libraries = expandedLibraries + + return ev, nil } - return nil + // Neither of the library paths were found. This is likely an invalid node, + // but it isn't this mutator's job to enforce that. Return the original value. + return []dyn.Value{v}, nil } -func getGlobPatternToExpand(library *pipelines.PipelineLibrary) string { - if library.File != nil { - return library.File.Path +func (m *expandPipelineGlobPaths) expandSequence(p dyn.Path, v dyn.Value) (dyn.Value, error) { + s, ok := v.AsSequence() + if !ok { + return dyn.InvalidValue, fmt.Errorf("expected sequence, got %s", v.Kind()) } - if library.Notebook != nil { - return library.Notebook.Path + var vs []dyn.Value + for _, sv := range s { + v, err := m.expandLibrary(sv) + if err != nil { + return dyn.InvalidValue, err + } + + vs = append(vs, v...) } - return "" + return dyn.NewValue(vs, v.Locations()), nil } -func cloneWithPath(library *pipelines.PipelineLibrary, path string) pipelines.PipelineLibrary { - if library.File != nil { - return pipelines.PipelineLibrary{ - File: &pipelines.FileLibrary{ - Path: path, - }, - } - } +func (m *expandPipelineGlobPaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + p := dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("pipelines"), + dyn.AnyKey(), + dyn.Key("libraries"), + ) - if library.Notebook != nil { - return pipelines.PipelineLibrary{ - Notebook: &pipelines.NotebookLibrary{ - Path: path, - }, - } - } + // Visit each pipeline's "libraries" field and expand any glob patterns. + return dyn.MapByPattern(v, p, m.expandSequence) + }) - return pipelines.PipelineLibrary{} + return diag.FromErr(err) } func (*expandPipelineGlobPaths) Name() string { diff --git a/bundle/config/mutator/expand_pipeline_glob_paths_test.go b/bundle/config/mutator/expand_pipeline_glob_paths_test.go index ad86865af..d1671c256 100644 --- a/bundle/config/mutator/expand_pipeline_glob_paths_test.go +++ b/bundle/config/mutator/expand_pipeline_glob_paths_test.go @@ -8,8 +8,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/stretchr/testify/require" @@ -35,16 +35,17 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "test1.py")) touchEmptyFile(t, filepath.Join(dir, "test/test2.py")) touchEmptyFile(t, filepath.Join(dir, "test/test3.py")) + touchEmptyFile(t, filepath.Join(dir, "relative/test4.py")) + touchEmptyFile(t, filepath.Join(dir, "relative/test5.py")) + touchEmptyFile(t, filepath.Join(dir, "skip/test6.py")) + touchEmptyFile(t, filepath.Join(dir, "skip/test7.py")) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -57,7 +58,13 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { }, { File: &pipelines.FileLibrary{ - Path: "./**/*.py", + Path: "./test/*.py", + }, + }, + { + // This value is annotated to be defined in the "./relative" directory. + File: &pipelines.FileLibrary{ + Path: "./*.py", }, }, { @@ -98,12 +105,15 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { }, } + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + bundletest.SetLocation(b, "resources.pipelines.pipeline.libraries[3]", filepath.Join(dir, "relative", "resource.yml")) + m := ExpandPipelineGlobPaths() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) libraries := b.Config.Resources.Pipelines["pipeline"].Libraries - require.Len(t, libraries, 11) + require.Len(t, libraries, 13) // Making sure glob patterns are expanded correctly require.True(t, containsNotebook(libraries, filepath.Join("test", "test2.ipynb"))) @@ -111,6 +121,10 @@ func TestExpandGlobPathsInPipelines(t *testing.T) { require.True(t, containsFile(libraries, filepath.Join("test", "test2.py"))) require.True(t, containsFile(libraries, filepath.Join("test", "test3.py"))) + // These patterns are defined relative to "./relative" + require.True(t, containsFile(libraries, "test4.py")) + require.True(t, containsFile(libraries, "test5.py")) + // Making sure exact file references work as well require.True(t, containsNotebook(libraries, "test1.ipynb")) diff --git a/bundle/config/mutator/expand_workspace_root.go b/bundle/config/mutator/expand_workspace_root.go index 59f19ccc4..8954abd46 100644 --- a/bundle/config/mutator/expand_workspace_root.go +++ b/bundle/config/mutator/expand_workspace_root.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type expandWorkspaceRoot struct{} @@ -20,15 +21,15 @@ func (m *expandWorkspaceRoot) Name() string { return "ExpandWorkspaceRoot" } -func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { root := b.Config.Workspace.RootPath if root == "" { - return fmt.Errorf("unable to expand workspace root: workspace root not defined") + return diag.Errorf("unable to expand workspace root: workspace root not defined") } currentUser := b.Config.Workspace.CurrentUser if currentUser == nil || currentUser.UserName == "" { - return fmt.Errorf("unable to expand workspace root: current user not set") + return diag.Errorf("unable to expand workspace root: current user not set") } if strings.HasPrefix(root, "~/") { diff --git a/bundle/config/mutator/expand_workspace_root_test.go b/bundle/config/mutator/expand_workspace_root_test.go index 17ee06509..e6260dbd8 100644 --- a/bundle/config/mutator/expand_workspace_root_test.go +++ b/bundle/config/mutator/expand_workspace_root_test.go @@ -25,8 +25,8 @@ func TestExpandWorkspaceRoot(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) + require.NoError(t, diags.Error()) assert.Equal(t, "/Users/jane@doe.com/foo", b.Config.Workspace.RootPath) } @@ -43,8 +43,8 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) + require.NoError(t, diags.Error()) assert.Equal(t, "/Users/charly@doe.com/foo", b.Config.Workspace.RootPath) } @@ -60,8 +60,8 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) - require.Error(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) + require.True(t, diags.HasError()) } func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) { @@ -72,6 +72,6 @@ func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) - require.Error(t, err) + diags := bundle.Apply(context.Background(), b, mutator.ExpandWorkspaceRoot()) + require.True(t, diags.HasError()) } diff --git a/bundle/config/mutator/if.go b/bundle/config/mutator/if.go deleted file mode 100644 index 462d8f004..000000000 --- a/bundle/config/mutator/if.go +++ /dev/null @@ -1,35 +0,0 @@ -package mutator - -import ( - "context" - - "github.com/databricks/cli/bundle" -) - -type ifMutator struct { - condition func(*bundle.Bundle) bool - onTrueMutator bundle.Mutator - onFalseMutator bundle.Mutator -} - -func If( - condition func(*bundle.Bundle) bool, - onTrueMutator bundle.Mutator, - onFalseMutator bundle.Mutator, -) bundle.Mutator { - return &ifMutator{ - condition, onTrueMutator, onFalseMutator, - } -} - -func (m *ifMutator) Apply(ctx context.Context, b *bundle.Bundle) error { - if m.condition(b) { - return bundle.Apply(ctx, b, m.onTrueMutator) - } else { - return bundle.Apply(ctx, b, m.onFalseMutator) - } -} - -func (m *ifMutator) Name() string { - return "If" -} diff --git a/bundle/config/mutator/initialize_variables.go b/bundle/config/mutator/initialize_variables.go index 8e50b4d04..e72cdde31 100644 --- a/bundle/config/mutator/initialize_variables.go +++ b/bundle/config/mutator/initialize_variables.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/diag" ) type initializeVariables struct{} @@ -18,7 +19,7 @@ func (m *initializeVariables) Name() string { return "InitializeVariables" } -func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *initializeVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { vars := b.Config.Variables for k, v := range vars { if v == nil { diff --git a/bundle/config/mutator/initialize_variables_test.go b/bundle/config/mutator/initialize_variables_test.go index 46445591a..3ca4384fa 100644 --- a/bundle/config/mutator/initialize_variables_test.go +++ b/bundle/config/mutator/initialize_variables_test.go @@ -23,8 +23,8 @@ func TestInitializeVariables(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) + require.NoError(t, diags.Error()) assert.NotNil(t, b.Config.Variables["foo"]) assert.NotNil(t, b.Config.Variables["bar"]) assert.Equal(t, "This is a description", b.Config.Variables["bar"].Description) @@ -36,7 +36,7 @@ func TestInitializeVariablesWithoutVariables(t *testing.T) { Variables: nil, }, } - err := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.InitializeVariables()) + require.NoError(t, diags.Error()) assert.Nil(t, b.Config.Variables) } diff --git a/bundle/config/mutator/initialize_workspace_client.go b/bundle/config/mutator/initialize_workspace_client.go index afc38d4d5..5c905f40c 100644 --- a/bundle/config/mutator/initialize_workspace_client.go +++ b/bundle/config/mutator/initialize_workspace_client.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type initializeWorkspaceClient struct{} @@ -19,7 +20,7 @@ func (m *initializeWorkspaceClient) Name() string { // Apply initializes the workspace client for the bundle. We do this here so // downstream calls to b.WorkspaceClient() do not panic if there's an error in the // auth configuration. -func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *initializeWorkspaceClient) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { _, err := b.InitializeWorkspaceClient() - return err + return diag.FromErr(err) } diff --git a/bundle/config/mutator/load_git_details.go b/bundle/config/mutator/load_git_details.go index 3a50d683e..9b1c963c9 100644 --- a/bundle/config/mutator/load_git_details.go +++ b/bundle/config/mutator/load_git_details.go @@ -5,6 +5,7 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/log" ) @@ -19,11 +20,11 @@ func (m *loadGitDetails) Name() string { return "LoadGitDetails" } -func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Load relevant git repository - repo, err := git.NewRepository(b.Config.Path) + repo, err := git.NewRepository(b.BundleRoot) if err != nil { - return err + return diag.FromErr(err) } // Read branch name of current checkout @@ -55,14 +56,14 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error { } // Compute relative path of the bundle root from the Git repo root. - absBundlePath, err := filepath.Abs(b.Config.Path) + absBundlePath, err := filepath.Abs(b.RootPath) if err != nil { - return err + return diag.FromErr(err) } // repo.Root() returns the absolute path of the repo relBundlePath, err := filepath.Rel(repo.Root(), absBundlePath) if err != nil { - return err + return diag.FromErr(err) } b.Config.Bundle.Git.BundleRootPath = filepath.ToSlash(relBundlePath) return nil diff --git a/bundle/config/mutator/merge_job_clusters.go b/bundle/config/mutator/merge_job_clusters.go new file mode 100644 index 000000000..aa131f287 --- /dev/null +++ b/bundle/config/mutator/merge_job_clusters.go @@ -0,0 +1,45 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" +) + +type mergeJobClusters struct{} + +func MergeJobClusters() bundle.Mutator { + return &mergeJobClusters{} +} + +func (m *mergeJobClusters) Name() string { + return "MergeJobClusters" +} + +func (m *mergeJobClusters) jobClusterKey(v dyn.Value) string { + switch v.Kind() { + case dyn.KindInvalid, dyn.KindNil: + return "" + case dyn.KindString: + return v.MustString() + default: + panic("job cluster key must be a string") + } +} + +func (m *mergeJobClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + if v.Kind() == dyn.KindNil { + return v, nil + } + + return dyn.Map(v, "resources.jobs", dyn.Foreach(func(_ dyn.Path, job dyn.Value) (dyn.Value, error) { + return dyn.Map(job, "job_clusters", merge.ElementsByKey("job_cluster_key", m.jobClusterKey)) + })) + }) + + return diag.FromErr(err) +} diff --git a/bundle/config/mutator/merge_job_clusters_test.go b/bundle/config/mutator/merge_job_clusters_test.go new file mode 100644 index 000000000..c9052c1f7 --- /dev/null +++ b/bundle/config/mutator/merge_job_clusters_test.go @@ -0,0 +1,105 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +func TestMergeJobClusters(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + JobClusterKey: "foo", + NewCluster: compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + }, + { + JobClusterKey: "bar", + NewCluster: compute.ClusterSpec{ + SparkVersion: "10.4.x-scala2.12", + }, + }, + { + JobClusterKey: "foo", + NewCluster: compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.MergeJobClusters()) + assert.NoError(t, diags.Error()) + + j := b.Config.Resources.Jobs["foo"] + + assert.Len(t, j.JobClusters, 2) + assert.Equal(t, "foo", j.JobClusters[0].JobClusterKey) + assert.Equal(t, "bar", j.JobClusters[1].JobClusterKey) + + // This job cluster was merged with a subsequent one. + jc0 := j.JobClusters[0].NewCluster + assert.Equal(t, "13.3.x-scala2.12", jc0.SparkVersion) + assert.Equal(t, "i3.2xlarge", jc0.NodeTypeId) + assert.Equal(t, 4, jc0.NumWorkers) + + // This job cluster was left untouched. + jc1 := j.JobClusters[1].NewCluster + assert.Equal(t, "10.4.x-scala2.12", jc1.SparkVersion) +} + +func TestMergeJobClustersWithNilKey(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + }, + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.MergeJobClusters()) + assert.NoError(t, diags.Error()) + assert.Len(t, b.Config.Resources.Jobs["foo"].JobClusters, 1) +} diff --git a/bundle/config/mutator/merge_job_tasks.go b/bundle/config/mutator/merge_job_tasks.go new file mode 100644 index 000000000..9498e8822 --- /dev/null +++ b/bundle/config/mutator/merge_job_tasks.go @@ -0,0 +1,45 @@ +package mutator + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" +) + +type mergeJobTasks struct{} + +func MergeJobTasks() bundle.Mutator { + return &mergeJobTasks{} +} + +func (m *mergeJobTasks) Name() string { + return "MergeJobTasks" +} + +func (m *mergeJobTasks) taskKeyString(v dyn.Value) string { + switch v.Kind() { + case dyn.KindInvalid, dyn.KindNil: + return "" + case dyn.KindString: + return v.MustString() + default: + panic("task key must be a string") + } +} + +func (m *mergeJobTasks) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + if v.Kind() == dyn.KindNil { + return v, nil + } + + return dyn.Map(v, "resources.jobs", dyn.Foreach(func(_ dyn.Path, job dyn.Value) (dyn.Value, error) { + return dyn.Map(job, "tasks", merge.ElementsByKey("task_key", m.taskKeyString)) + })) + }) + + return diag.FromErr(err) +} diff --git a/bundle/config/mutator/merge_job_tasks_test.go b/bundle/config/mutator/merge_job_tasks_test.go new file mode 100644 index 000000000..a9dae1e10 --- /dev/null +++ b/bundle/config/mutator/merge_job_tasks_test.go @@ -0,0 +1,117 @@ +package mutator_test + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +func TestMergeJobTasks(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "foo", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + Libraries: []compute.Library{ + {Whl: "package1"}, + }, + }, + { + TaskKey: "bar", + NewCluster: &compute.ClusterSpec{ + SparkVersion: "10.4.x-scala2.12", + }, + }, + { + TaskKey: "foo", + NewCluster: &compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + Libraries: []compute.Library{ + {Pypi: &compute.PythonPyPiLibrary{ + Package: "package2", + }}, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.MergeJobTasks()) + assert.NoError(t, diags.Error()) + + j := b.Config.Resources.Jobs["foo"] + + assert.Len(t, j.Tasks, 2) + assert.Equal(t, "foo", j.Tasks[0].TaskKey) + assert.Equal(t, "bar", j.Tasks[1].TaskKey) + + // This task was merged with a subsequent one. + task0 := j.Tasks[0] + cluster := task0.NewCluster + assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion) + assert.Equal(t, "i3.2xlarge", cluster.NodeTypeId) + assert.Equal(t, 4, cluster.NumWorkers) + assert.Len(t, task0.Libraries, 2) + assert.Equal(t, task0.Libraries[0].Whl, "package1") + assert.Equal(t, task0.Libraries[1].Pypi.Package, "package2") + + // This task was left untouched. + task1 := j.Tasks[1].NewCluster + assert.Equal(t, "10.4.x-scala2.12", task1.SparkVersion) +} + +func TestMergeJobTasksWithNilKey(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "foo": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + NewCluster: &compute.ClusterSpec{ + SparkVersion: "13.3.x-scala2.12", + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + }, + }, + { + NewCluster: &compute.ClusterSpec{ + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.MergeJobTasks()) + assert.NoError(t, diags.Error()) + assert.Len(t, b.Config.Resources.Jobs["foo"].Tasks, 1) +} diff --git a/bundle/config/mutator/merge_pipeline_clusters.go b/bundle/config/mutator/merge_pipeline_clusters.go new file mode 100644 index 000000000..52f3e6fa6 --- /dev/null +++ b/bundle/config/mutator/merge_pipeline_clusters.go @@ -0,0 +1,48 @@ +package mutator + +import ( + "context" + "strings" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" +) + +type mergePipelineClusters struct{} + +func MergePipelineClusters() bundle.Mutator { + return &mergePipelineClusters{} +} + +func (m *mergePipelineClusters) Name() string { + return "MergePipelineClusters" +} + +func (m *mergePipelineClusters) clusterLabel(v dyn.Value) string { + switch v.Kind() { + case dyn.KindInvalid, dyn.KindNil: + // Note: the cluster label is optional and defaults to 'default'. + // We therefore ALSO merge all clusters without a label. + return "default" + case dyn.KindString: + return strings.ToLower(v.MustString()) + default: + panic("task key must be a string") + } +} + +func (m *mergePipelineClusters) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + if v.Kind() == dyn.KindNil { + return v, nil + } + + return dyn.Map(v, "resources.pipelines", dyn.Foreach(func(_ dyn.Path, pipeline dyn.Value) (dyn.Value, error) { + return dyn.Map(pipeline, "clusters", merge.ElementsByKey("label", m.clusterLabel)) + })) + }) + + return diag.FromErr(err) +} diff --git a/bundle/config/mutator/merge_pipeline_clusters_test.go b/bundle/config/mutator/merge_pipeline_clusters_test.go new file mode 100644 index 000000000..f117d9399 --- /dev/null +++ b/bundle/config/mutator/merge_pipeline_clusters_test.go @@ -0,0 +1,125 @@ +package mutator_test + +import ( + "context" + "strings" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/assert" +) + +func TestMergePipelineClusters(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "foo": { + PipelineSpec: &pipelines.PipelineSpec{ + Clusters: []pipelines.PipelineCluster{ + { + NodeTypeId: "i3.xlarge", + NumWorkers: 2, + PolicyId: "1234", + }, + { + Label: "maintenance", + NodeTypeId: "i3.2xlarge", + }, + { + NodeTypeId: "i3.2xlarge", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, diags.Error()) + + p := b.Config.Resources.Pipelines["foo"] + + assert.Len(t, p.Clusters, 2) + assert.Equal(t, "default", p.Clusters[0].Label) + assert.Equal(t, "maintenance", p.Clusters[1].Label) + + // The default cluster was merged with a subsequent one. + pc0 := p.Clusters[0] + assert.Equal(t, "i3.2xlarge", pc0.NodeTypeId) + assert.Equal(t, 4, pc0.NumWorkers) + assert.Equal(t, "1234", pc0.PolicyId) + + // The maintenance cluster was left untouched. + pc1 := p.Clusters[1] + assert.Equal(t, "i3.2xlarge", pc1.NodeTypeId) +} + +func TestMergePipelineClustersCaseInsensitive(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "foo": { + PipelineSpec: &pipelines.PipelineSpec{ + Clusters: []pipelines.PipelineCluster{ + { + Label: "default", + NumWorkers: 2, + }, + { + Label: "DEFAULT", + NumWorkers: 4, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, diags.Error()) + + p := b.Config.Resources.Pipelines["foo"] + assert.Len(t, p.Clusters, 1) + + // The default cluster was merged with a subsequent one. + pc0 := p.Clusters[0] + assert.Equal(t, "default", strings.ToLower(pc0.Label)) + assert.Equal(t, 4, pc0.NumWorkers) +} + +func TestMergePipelineClustersNilPipelines(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: nil, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, diags.Error()) +} + +func TestMergePipelineClustersEmptyPipelines(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{}, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.MergePipelineClusters()) + assert.NoError(t, diags.Error()) +} diff --git a/bundle/config/mutator/mutator.go b/bundle/config/mutator/mutator.go index b6327e859..52f85eeb8 100644 --- a/bundle/config/mutator/mutator.go +++ b/bundle/config/mutator/mutator.go @@ -3,19 +3,28 @@ package mutator import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/loader" + pythonmutator "github.com/databricks/cli/bundle/config/mutator/python" "github.com/databricks/cli/bundle/scripts" ) func DefaultMutators() []bundle.Mutator { return []bundle.Mutator{ + loader.EntryPoint(), + + // Execute preinit script before processing includes. + // It needs to be done before processing configuration files to allow + // the script to modify the configuration or add own configuration files. scripts.Execute(config.ScriptPreInit), - ProcessRootIncludes(), + loader.ProcessRootIncludes(), + + // Verify that the CLI version is within the specified range. + VerifyCliVersion(), + + EnvironmentsToTargets(), InitializeVariables(), DefineDefaultTarget(), LoadGitDetails(), + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseLoad), } } - -func DefaultMutatorsForTarget(env string) []bundle.Mutator { - return append(DefaultMutators(), SelectTarget(env)) -} diff --git a/bundle/config/mutator/noop.go b/bundle/config/mutator/noop.go index 91c16385b..f27c940e3 100644 --- a/bundle/config/mutator/noop.go +++ b/bundle/config/mutator/noop.go @@ -4,11 +4,12 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type noop struct{} -func (*noop) Apply(context.Context, *bundle.Bundle) error { +func (*noop) Apply(context.Context, *bundle.Bundle) diag.Diagnostics { return nil } diff --git a/bundle/config/mutator/override_compute.go b/bundle/config/mutator/override_compute.go index 21d950135..73fbad364 100644 --- a/bundle/config/mutator/override_compute.go +++ b/bundle/config/mutator/override_compute.go @@ -2,11 +2,11 @@ package mutator import ( "context" - "fmt" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/env" ) @@ -22,20 +22,25 @@ func (m *overrideCompute) Name() string { func overrideJobCompute(j *resources.Job, compute string) { for i := range j.Tasks { - task := &j.Tasks[i] - if task.NewCluster != nil || task.ExistingClusterId != "" || task.ComputeKey != "" || task.JobClusterKey != "" { + var task = &j.Tasks[i] + + if task.ForEachTask != nil { + task = &task.ForEachTask.Task + } + + if task.NewCluster != nil || task.ExistingClusterId != "" || task.EnvironmentKey != "" || task.JobClusterKey != "" { task.NewCluster = nil task.JobClusterKey = "" - task.ComputeKey = "" + task.EnvironmentKey = "" task.ExistingClusterId = compute } } } -func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Bundle.Mode != config.Development { if b.Config.Bundle.ComputeID != "" { - return fmt.Errorf("cannot override compute for an target that does not use 'mode: development'") + return diag.Errorf("cannot override compute for an target that does not use 'mode: development'") } return nil } diff --git a/bundle/config/mutator/override_compute_test.go b/bundle/config/mutator/override_compute_test.go index 4c5d4427d..152ee543e 100644 --- a/bundle/config/mutator/override_compute_test.go +++ b/bundle/config/mutator/override_compute_test.go @@ -28,13 +28,15 @@ func TestOverrideDevelopment(t *testing.T) { Name: "job1", Tasks: []jobs.Task{ { - NewCluster: &compute.ClusterSpec{}, + NewCluster: &compute.ClusterSpec{ + SparkVersion: "14.2.x-scala2.12", + }, }, { ExistingClusterId: "cluster2", }, { - ComputeKey: "compute_key", + EnvironmentKey: "environment_key", }, { JobClusterKey: "cluster_key", @@ -47,8 +49,8 @@ func TestOverrideDevelopment(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) @@ -56,7 +58,7 @@ func TestOverrideDevelopment(t *testing.T) { assert.Equal(t, "newClusterID", b.Config.Resources.Jobs["job1"].Tasks[3].ExistingClusterId) assert.Nil(t, b.Config.Resources.Jobs["job1"].Tasks[0].NewCluster) - assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[2].ComputeKey) + assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[2].EnvironmentKey) assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey) } @@ -83,8 +85,8 @@ func TestOverrideDevelopmentEnv(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) assert.Equal(t, "cluster2", b.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId) } @@ -108,11 +110,36 @@ func TestOverridePipelineTask(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId) } +func TestOverrideForEachTask(t *testing.T) { + t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId") + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": {JobSettings: &jobs.JobSettings{ + Name: "job1", + Tasks: []jobs.Task{ + { + ForEachTask: &jobs.ForEachTask{}, + }, + }, + }}, + }, + }, + }, + } + + m := mutator.OverrideCompute() + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + assert.Empty(t, b.Config.Resources.Jobs["job1"].Tasks[0].ForEachTask.Task) +} + func TestOverrideProduction(t *testing.T) { b := &bundle.Bundle{ Config: config.Root{ @@ -138,8 +165,8 @@ func TestOverrideProduction(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.Error(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.True(t, diags.HasError()) } func TestOverrideProductionEnv(t *testing.T) { @@ -165,6 +192,6 @@ func TestOverrideProductionEnv(t *testing.T) { } m := mutator.OverrideCompute() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) } diff --git a/bundle/config/mutator/populate_current_user.go b/bundle/config/mutator/populate_current_user.go index a604cb902..b5e0bd437 100644 --- a/bundle/config/mutator/populate_current_user.go +++ b/bundle/config/mutator/populate_current_user.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/tags" ) @@ -20,7 +21,7 @@ func (m *populateCurrentUser) Name() string { return "PopulateCurrentUser" } -func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Workspace.CurrentUser != nil { return nil } @@ -28,7 +29,7 @@ func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error w := b.WorkspaceClient() me, err := w.CurrentUser.Me(ctx) if err != nil { - return err + return diag.FromErr(err) } b.Config.Workspace.CurrentUser = &config.User{ diff --git a/bundle/config/mutator/process_root_includes_test.go b/bundle/config/mutator/process_root_includes_test.go deleted file mode 100644 index 88a6c7433..000000000 --- a/bundle/config/mutator/process_root_includes_test.go +++ /dev/null @@ -1,167 +0,0 @@ -package mutator_test - -import ( - "context" - "os" - "path" - "path/filepath" - "runtime" - "strings" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/env" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func touch(t *testing.T, path, file string) { - f, err := os.Create(filepath.Join(path, file)) - require.NoError(t, err) - f.Close() -} - -func TestProcessRootIncludesEmpty(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Path: ".", - }, - } - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) -} - -func TestProcessRootIncludesAbs(t *testing.T) { - // remove this once equivalent tests for windows have been set up - // or this test has been fixed for windows - // date: 28 Nov 2022 - if runtime.GOOS == "windows" { - t.Skip("skipping temperorilty to make windows unit tests green") - } - - b := &bundle.Bundle{ - Config: config.Root{ - Path: ".", - Include: []string{ - "/tmp/*.yml", - }, - }, - } - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.Error(t, err) - assert.Contains(t, err.Error(), "must be relative paths") -} - -func TestProcessRootIncludesSingleGlob(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Path: t.TempDir(), - Include: []string{ - "*.yml", - }, - }, - } - - touch(t, b.Config.Path, "databricks.yml") - touch(t, b.Config.Path, "a.yml") - touch(t, b.Config.Path, "b.yml") - - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) - - assert.Equal(t, []string{"a.yml", "b.yml"}, b.Config.Include) -} - -func TestProcessRootIncludesMultiGlob(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Path: t.TempDir(), - Include: []string{ - "a*.yml", - "b*.yml", - }, - }, - } - - touch(t, b.Config.Path, "a1.yml") - touch(t, b.Config.Path, "b1.yml") - - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) - - assert.Equal(t, []string{"a1.yml", "b1.yml"}, b.Config.Include) -} - -func TestProcessRootIncludesRemoveDups(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Path: t.TempDir(), - Include: []string{ - "*.yml", - "*.yml", - }, - }, - } - - touch(t, b.Config.Path, "a.yml") - - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) - assert.Equal(t, []string{"a.yml"}, b.Config.Include) -} - -func TestProcessRootIncludesNotExists(t *testing.T) { - b := &bundle.Bundle{ - Config: config.Root{ - Path: t.TempDir(), - Include: []string{ - "notexist.yml", - }, - }, - } - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.Error(t, err) - assert.Contains(t, err.Error(), "notexist.yml defined in 'include' section does not match any files") -} - -func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) { - rootPath := t.TempDir() - testYamlName := "extra_include_path.yml" - touch(t, rootPath, testYamlName) - t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName)) - - b := &bundle.Bundle{ - Config: config.Root{ - Path: rootPath, - }, - } - - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) - assert.Contains(t, b.Config.Include, testYamlName) -} - -func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) { - rootPath := t.TempDir() - testYamlName := "extra_include_path.yml" - touch(t, rootPath, testYamlName) - t.Setenv(env.IncludesVariable, strings.Join( - []string{ - path.Join(rootPath, testYamlName), - path.Join(rootPath, testYamlName), - }, - string(os.PathListSeparator), - )) - - b := &bundle.Bundle{ - Config: config.Root{ - Path: rootPath, - }, - } - - err := bundle.Apply(context.Background(), b, mutator.ProcessRootIncludes()) - require.NoError(t, err) - assert.Equal(t, []string{testYamlName}, b.Config.Include) -} diff --git a/bundle/config/mutator/process_target_mode.go b/bundle/config/mutator/process_target_mode.go index 592e3612c..b50716fd6 100644 --- a/bundle/config/mutator/process_target_mode.go +++ b/bundle/config/mutator/process_target_mode.go @@ -2,14 +2,15 @@ package mutator import ( "context" - "fmt" "path" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -29,9 +30,14 @@ func (m *processTargetMode) Name() string { // Mark all resources as being for 'development' purposes, i.e. // changing their their name, adding tags, and (in the future) // marking them as 'hidden' in the UI. -func transformDevelopmentMode(b *bundle.Bundle) error { - r := b.Config.Resources +func transformDevelopmentMode(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + if !b.Config.Bundle.Deployment.Lock.IsExplicitlyEnabled() { + log.Infof(ctx, "Development mode: disabling deployment lock since bundle.deployment.lock.enabled is not set to true") + disabled := false + b.Config.Bundle.Deployment.Lock.Enabled = &disabled + } + r := b.Config.Resources shortName := b.Config.Workspace.CurrentUser.ShortName prefix := "[dev " + shortName + "] " @@ -70,7 +76,7 @@ func transformDevelopmentMode(b *bundle.Bundle) error { for i := range r.Models { r.Models[i].Name = prefix + r.Models[i].Name - r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: ""}) + r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: tagValue}) } for i := range r.Experiments { @@ -97,12 +103,21 @@ func transformDevelopmentMode(b *bundle.Bundle) error { // (registered models in Unity Catalog don't yet support tags) } + for i := range r.QualityMonitors { + // Remove all schedules from monitors, since they don't support pausing/unpausing. + // Quality monitors might support the "pause" property in the future, so at the + // CLI level we do respect that property if it is set to "unpaused". + if r.QualityMonitors[i].Schedule != nil && r.QualityMonitors[i].Schedule.PauseStatus != catalog.MonitorCronSchedulePauseStatusUnpaused { + r.QualityMonitors[i].Schedule = nil + } + } + return nil } -func validateDevelopmentMode(b *bundle.Bundle) error { +func validateDevelopmentMode(b *bundle.Bundle) diag.Diagnostics { if path := findNonUserPath(b); path != "" { - return fmt.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path) + return diag.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path) } return nil } @@ -125,7 +140,7 @@ func findNonUserPath(b *bundle.Bundle) string { return "" } -func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) error { +func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) diag.Diagnostics { if b.Config.Bundle.Git.Inferred { env := b.Config.Bundle.Target log.Warnf(ctx, "target with 'mode: production' should specify an explicit 'targets.%s.git' configuration", env) @@ -134,12 +149,12 @@ func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUs r := b.Config.Resources for i := range r.Pipelines { if r.Pipelines[i].Development { - return fmt.Errorf("target with 'mode: production' cannot include a pipeline with 'development: true'") + return diag.Errorf("target with 'mode: production' cannot include a pipeline with 'development: true'") } } if !isPrincipalUsed && !isRunAsSet(r) { - return fmt.Errorf("'run_as' must be set for all jobs when using 'mode: production'") + return diag.Errorf("'run_as' must be set for all jobs when using 'mode: production'") } return nil } @@ -156,21 +171,21 @@ func isRunAsSet(r config.Resources) bool { return true } -func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { switch b.Config.Bundle.Mode { case config.Development: - err := validateDevelopmentMode(b) - if err != nil { - return err + diags := validateDevelopmentMode(b) + if diags != nil { + return diags } - return transformDevelopmentMode(b) + return transformDevelopmentMode(ctx, b) case config.Production: isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.UserName) return validateProductionMode(ctx, b, isPrincipal) case "": // No action default: - return fmt.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode) + return diag.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode) } return nil diff --git a/bundle/config/mutator/process_target_mode_test.go b/bundle/config/mutator/process_target_mode_test.go index f02d78865..03da64e77 100644 --- a/bundle/config/mutator/process_target_mode_test.go +++ b/bundle/config/mutator/process_target_mode_test.go @@ -97,6 +97,23 @@ func mockBundle(mode config.Mode) *bundle.Bundle { RegisteredModels: map[string]*resources.RegisteredModel{ "registeredmodel1": {CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{Name: "registeredmodel1"}}, }, + QualityMonitors: map[string]*resources.QualityMonitor{ + "qualityMonitor1": {CreateMonitor: &catalog.CreateMonitor{TableName: "qualityMonitor1"}}, + "qualityMonitor2": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "qualityMonitor2", + Schedule: &catalog.MonitorCronSchedule{}, + }, + }, + "qualityMonitor3": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "qualityMonitor3", + Schedule: &catalog.MonitorCronSchedule{ + PauseStatus: catalog.MonitorCronSchedulePauseStatusUnpaused, + }, + }, + }, + }, }, }, // Use AWS implementation for testing. @@ -110,8 +127,8 @@ func TestProcessTargetModeDevelopment(t *testing.T) { b := mockBundle(config.Development) m := ProcessTargetMode() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) // Job 1 assert.Equal(t, "[dev lennart] job1", b.Config.Resources.Jobs["job1"].Name) @@ -138,12 +155,18 @@ func TestProcessTargetModeDevelopment(t *testing.T) { // Model 1 assert.Equal(t, "[dev lennart] model1", b.Config.Resources.Models["model1"].Name) + assert.Contains(t, b.Config.Resources.Models["model1"].Tags, ml.ModelTag{Key: "dev", Value: "lennart"}) // Model serving endpoint 1 assert.Equal(t, "dev_lennart_servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) // Registered model 1 assert.Equal(t, "dev_lennart_registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) + + // Quality Monitor 1 + assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) + assert.Nil(t, b.Config.Resources.QualityMonitors["qualityMonitor2"].Schedule) + assert.Equal(t, catalog.MonitorCronSchedulePauseStatusUnpaused, b.Config.Resources.QualityMonitors["qualityMonitor3"].Schedule.PauseStatus) } func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { @@ -153,8 +176,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAws(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := bundle.Apply(context.Background(), b, ProcessTargetMode()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ProcessTargetMode()) + require.NoError(t, diags.Error()) // Assert that tag normalization took place. assert.Equal(t, "Hello world__", b.Config.Resources.Jobs["job1"].Tags["dev"]) @@ -167,8 +190,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForAzure(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := bundle.Apply(context.Background(), b, ProcessTargetMode()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ProcessTargetMode()) + require.NoError(t, diags.Error()) // Assert that tag normalization took place (Azure allows more characters than AWS). assert.Equal(t, "Héllö wörld?!", b.Config.Resources.Jobs["job1"].Tags["dev"]) @@ -181,8 +204,8 @@ func TestProcessTargetModeDevelopmentTagNormalizationForGcp(t *testing.T) { }) b.Config.Workspace.CurrentUser.ShortName = "Héllö wörld?!" - err := bundle.Apply(context.Background(), b, ProcessTargetMode()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ProcessTargetMode()) + require.NoError(t, diags.Error()) // Assert that tag normalization took place. assert.Equal(t, "Hello_world", b.Config.Resources.Jobs["job1"].Tags["dev"]) @@ -192,27 +215,28 @@ func TestProcessTargetModeDefault(t *testing.T) { b := mockBundle("") m := ProcessTargetMode() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name) assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) + assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) } func TestProcessTargetModeProduction(t *testing.T) { b := mockBundle(config.Production) - err := validateProductionMode(context.Background(), b, false) - require.ErrorContains(t, err, "run_as") + diags := validateProductionMode(context.Background(), b, false) + require.ErrorContains(t, diags.Error(), "run_as") b.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state" b.Config.Workspace.ArtifactPath = "/Shared/.bundle/x/y/artifacts" b.Config.Workspace.FilePath = "/Shared/.bundle/x/y/files" - err = validateProductionMode(context.Background(), b, false) - require.ErrorContains(t, err, "production") + diags = validateProductionMode(context.Background(), b, false) + require.ErrorContains(t, diags.Error(), "production") permissions := []resources.Permission{ { @@ -231,26 +255,27 @@ func TestProcessTargetModeProduction(t *testing.T) { b.Config.Resources.Models["model1"].Permissions = permissions b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions - err = validateProductionMode(context.Background(), b, false) - require.NoError(t, err) + diags = validateProductionMode(context.Background(), b, false) + require.NoError(t, diags.Error()) assert.Equal(t, "job1", b.Config.Resources.Jobs["job1"].Name) assert.Equal(t, "pipeline1", b.Config.Resources.Pipelines["pipeline1"].Name) assert.False(t, b.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development) assert.Equal(t, "servingendpoint1", b.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name) assert.Equal(t, "registeredmodel1", b.Config.Resources.RegisteredModels["registeredmodel1"].Name) + assert.Equal(t, "qualityMonitor1", b.Config.Resources.QualityMonitors["qualityMonitor1"].TableName) } func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) { b := mockBundle(config.Production) // Our target has all kinds of problems when not using service principals ... - err := validateProductionMode(context.Background(), b, false) - require.Error(t, err) + diags := validateProductionMode(context.Background(), b, false) + require.Error(t, diags.Error()) // ... but we're much less strict when a principal is used - err = validateProductionMode(context.Background(), b, true) - require.NoError(t, err) + diags = validateProductionMode(context.Background(), b, true) + require.NoError(t, diags.Error()) } // Make sure that we have test coverage for all resource types @@ -274,12 +299,12 @@ func TestAllResourcesMocked(t *testing.T) { // Make sure that we at least rename all resources func TestAllResourcesRenamed(t *testing.T) { b := mockBundle(config.Development) - resources := reflect.ValueOf(b.Config.Resources) m := ProcessTargetMode() - err := bundle.Apply(context.Background(), b, m) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + resources := reflect.ValueOf(b.Config.Resources) for i := 0; i < resources.NumField(); i++ { field := resources.Field(i) @@ -300,3 +325,23 @@ func TestAllResourcesRenamed(t *testing.T) { } } } + +func TestDisableLocking(t *testing.T) { + ctx := context.Background() + b := mockBundle(config.Development) + + err := bundle.Apply(ctx, b, ProcessTargetMode()) + require.Nil(t, err) + assert.False(t, b.Config.Bundle.Deployment.Lock.IsEnabled()) +} + +func TestDisableLockingDisabled(t *testing.T) { + ctx := context.Background() + b := mockBundle(config.Development) + explicitlyEnabled := true + b.Config.Bundle.Deployment.Lock.Enabled = &explicitlyEnabled + + err := bundle.Apply(ctx, b, ProcessTargetMode()) + require.Nil(t, err) + assert.True(t, b.Config.Bundle.Deployment.Lock.IsEnabled(), "Deployment lock should remain enabled in development mode when explicitly enabled") +} diff --git a/bundle/config/mutator/python/log_writer.go b/bundle/config/mutator/python/log_writer.go new file mode 100644 index 000000000..aa3db0571 --- /dev/null +++ b/bundle/config/mutator/python/log_writer.go @@ -0,0 +1,42 @@ +package python + +import ( + "bufio" + "bytes" + "context" + "io" + + "github.com/databricks/cli/libs/log" +) + +type logWriter struct { + ctx context.Context + prefix string + buf bytes.Buffer +} + +// newLogWriter creates a new io.Writer that writes to log with specified prefix. +func newLogWriter(ctx context.Context, prefix string) io.Writer { + return &logWriter{ + ctx: ctx, + prefix: prefix, + } +} + +func (p *logWriter) Write(bytes []byte) (n int, err error) { + p.buf.Write(bytes) + + scanner := bufio.NewScanner(&p.buf) + + for scanner.Scan() { + line := scanner.Text() + + log.Debugf(p.ctx, "%s%s", p.prefix, line) + } + + remaining := p.buf.Bytes() + p.buf.Reset() + p.buf.Write(remaining) + + return len(bytes), nil +} diff --git a/bundle/config/mutator/python/python_diagnostics.go b/bundle/config/mutator/python/python_diagnostics.go new file mode 100644 index 000000000..b8efc9ef7 --- /dev/null +++ b/bundle/config/mutator/python/python_diagnostics.go @@ -0,0 +1,97 @@ +package python + +import ( + "encoding/json" + "fmt" + "io" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type pythonDiagnostic struct { + Severity pythonSeverity `json:"severity"` + Summary string `json:"summary"` + Detail string `json:"detail,omitempty"` + Location pythonDiagnosticLocation `json:"location,omitempty"` + Path string `json:"path,omitempty"` +} + +type pythonDiagnosticLocation struct { + File string `json:"file"` + Line int `json:"line"` + Column int `json:"column"` +} + +type pythonSeverity = string + +const ( + pythonError pythonSeverity = "error" + pythonWarning pythonSeverity = "warning" +) + +// parsePythonDiagnostics parses diagnostics from the Python mutator. +// +// diagnostics file is newline-separated JSON objects with pythonDiagnostic structure. +func parsePythonDiagnostics(input io.Reader) (diag.Diagnostics, error) { + diags := diag.Diagnostics{} + decoder := json.NewDecoder(input) + + for decoder.More() { + var parsedLine pythonDiagnostic + + err := decoder.Decode(&parsedLine) + if err != nil { + return nil, fmt.Errorf("failed to parse diags: %s", err) + } + + severity, err := convertPythonSeverity(parsedLine.Severity) + if err != nil { + return nil, fmt.Errorf("failed to parse severity: %s", err) + } + + path, err := convertPythonPath(parsedLine.Path) + if err != nil { + return nil, fmt.Errorf("failed to parse path: %s", err) + } + + diag := diag.Diagnostic{ + Severity: severity, + Summary: parsedLine.Summary, + Detail: parsedLine.Detail, + Location: convertPythonLocation(parsedLine.Location), + Path: path, + } + + diags = diags.Append(diag) + } + + return diags, nil +} + +func convertPythonPath(path string) (dyn.Path, error) { + if path == "" { + return nil, nil + } + + return dyn.NewPathFromString(path) +} + +func convertPythonSeverity(severity pythonSeverity) (diag.Severity, error) { + switch severity { + case pythonError: + return diag.Error, nil + case pythonWarning: + return diag.Warning, nil + default: + return 0, fmt.Errorf("unexpected value: %s", severity) + } +} + +func convertPythonLocation(location pythonDiagnosticLocation) dyn.Location { + return dyn.Location{ + File: location.File, + Line: location.Line, + Column: location.Column, + } +} diff --git a/bundle/config/mutator/python/python_diagnostics_test.go b/bundle/config/mutator/python/python_diagnostics_test.go new file mode 100644 index 000000000..7b66e2537 --- /dev/null +++ b/bundle/config/mutator/python/python_diagnostics_test.go @@ -0,0 +1,107 @@ +package python + +import ( + "bytes" + "testing" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +func TestConvertPythonLocation(t *testing.T) { + location := convertPythonLocation(pythonDiagnosticLocation{ + File: "src/examples/file.py", + Line: 1, + Column: 2, + }) + + assert.Equal(t, dyn.Location{ + File: "src/examples/file.py", + Line: 1, + Column: 2, + }, location) +} + +type parsePythonDiagnosticsTest struct { + name string + input string + expected diag.Diagnostics +} + +func TestParsePythonDiagnostics(t *testing.T) { + + testCases := []parsePythonDiagnosticsTest{ + { + name: "short error with location", + input: `{"severity": "error", "summary": "error summary", "location": {"file": "src/examples/file.py", "line": 1, "column": 2}}`, + expected: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "error summary", + Location: dyn.Location{ + File: "src/examples/file.py", + Line: 1, + Column: 2, + }, + }, + }, + }, + { + name: "short error with path", + input: `{"severity": "error", "summary": "error summary", "path": "resources.jobs.job0.name"}`, + expected: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "error summary", + Path: dyn.MustPathFromString("resources.jobs.job0.name"), + }, + }, + }, + { + name: "empty file", + input: "", + expected: diag.Diagnostics{}, + }, + { + name: "newline file", + input: "\n", + expected: diag.Diagnostics{}, + }, + { + name: "warning with detail", + input: `{"severity": "warning", "summary": "warning summary", "detail": "warning detail"}`, + expected: diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: "warning summary", + Detail: "warning detail", + }, + }, + }, + { + name: "multiple errors", + input: `{"severity": "error", "summary": "error summary (1)"}` + "\n" + + `{"severity": "error", "summary": "error summary (2)"}`, + expected: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "error summary (1)", + }, + { + Severity: diag.Error, + Summary: "error summary (2)", + }, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + diags, err := parsePythonDiagnostics(bytes.NewReader([]byte(tc.input))) + + assert.NoError(t, err) + assert.Equal(t, tc.expected, diags) + }) + } +} diff --git a/bundle/config/mutator/python/python_mutator.go b/bundle/config/mutator/python/python_mutator.go new file mode 100644 index 000000000..f9febe5b5 --- /dev/null +++ b/bundle/config/mutator/python/python_mutator.go @@ -0,0 +1,433 @@ +package python + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "path/filepath" + "runtime" + + "github.com/databricks/databricks-sdk-go/logger" + + "github.com/databricks/cli/bundle/env" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/process" +) + +type phase string + +const ( + // PythonMutatorPhaseLoad is the phase in which bundle configuration is loaded. + // + // At this stage, PyDABs adds statically defined resources to the bundle configuration. + // Which resources are added should be deterministic and not depend on the bundle configuration. + // + // We also open for possibility of appending other sections of bundle configuration, + // for example, adding new variables. However, this is not supported yet, and CLI rejects + // such changes. + PythonMutatorPhaseLoad phase = "load" + + // PythonMutatorPhaseInit is the phase after bundle configuration was loaded, and + // the list of statically declared resources is known. + // + // At this stage, PyDABs adds resources defined using generators, or mutates existing resources, + // including the ones defined using YAML. + // + // During this process, within generator and mutators, PyDABs can access: + // - selected deployment target + // - bundle variables values + // - variables provided through CLI arguments or environment variables + // + // The following is not available: + // - variables referencing other variables are in unresolved format + // + // PyDABs can output YAML containing references to variables, and CLI should resolve them. + // + // Existing resources can't be removed, and CLI rejects such changes. + PythonMutatorPhaseInit phase = "init" +) + +type pythonMutator struct { + phase phase +} + +func PythonMutator(phase phase) bundle.Mutator { + return &pythonMutator{ + phase: phase, + } +} + +func (m *pythonMutator) Name() string { + return fmt.Sprintf("PythonMutator(%s)", m.phase) +} + +func getExperimental(b *bundle.Bundle) config.Experimental { + if b.Config.Experimental == nil { + return config.Experimental{} + } + + return *b.Config.Experimental +} + +func (m *pythonMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + experimental := getExperimental(b) + + if !experimental.PyDABs.Enabled { + return nil + } + + if experimental.PyDABs.VEnvPath == "" { + return diag.Errorf("\"experimental.pydabs.enabled\" can only be used when \"experimental.pydabs.venv_path\" is set") + } + + // mutateDiags is used because Mutate returns 'error' instead of 'diag.Diagnostics' + var mutateDiags diag.Diagnostics + var mutateDiagsHasError = errors.New("unexpected error") + + err := b.Config.Mutate(func(leftRoot dyn.Value) (dyn.Value, error) { + pythonPath := interpreterPath(experimental.PyDABs.VEnvPath) + + if _, err := os.Stat(pythonPath); err != nil { + if os.IsNotExist(err) { + return dyn.InvalidValue, fmt.Errorf("can't find %q, check if venv is created", pythonPath) + } else { + return dyn.InvalidValue, fmt.Errorf("can't find %q: %w", pythonPath, err) + } + } + + cacheDir, err := createCacheDir(ctx) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to create cache dir: %w", err) + } + + rightRoot, diags := m.runPythonMutator(ctx, cacheDir, b.RootPath, pythonPath, leftRoot) + mutateDiags = diags + if diags.HasError() { + return dyn.InvalidValue, mutateDiagsHasError + } + + visitor, err := createOverrideVisitor(ctx, m.phase) + if err != nil { + return dyn.InvalidValue, err + } + + return merge.Override(leftRoot, rightRoot, visitor) + }) + + if err == mutateDiagsHasError { + if !mutateDiags.HasError() { + panic("mutateDiags has no error, but error is expected") + } + + return mutateDiags + } + + return mutateDiags.Extend(diag.FromErr(err)) +} + +func createCacheDir(ctx context.Context) (string, error) { + // b.CacheDir doesn't work because target isn't yet selected + + // support the same env variable as in b.CacheDir + if tempDir, exists := env.TempDir(ctx); exists { + // use 'default' as target name + cacheDir := filepath.Join(tempDir, "default", "pydabs") + + err := os.MkdirAll(cacheDir, 0700) + if err != nil { + return "", err + } + + return cacheDir, nil + } + + return os.MkdirTemp("", "-pydabs") +} + +func (m *pythonMutator) runPythonMutator(ctx context.Context, cacheDir string, rootPath string, pythonPath string, root dyn.Value) (dyn.Value, diag.Diagnostics) { + inputPath := filepath.Join(cacheDir, "input.json") + outputPath := filepath.Join(cacheDir, "output.json") + diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json") + + args := []string{ + pythonPath, + "-m", + "databricks.bundles.build", + "--phase", + string(m.phase), + "--input", + inputPath, + "--output", + outputPath, + "--diagnostics", + diagnosticsPath, + } + + if err := writeInputFile(inputPath, root); err != nil { + return dyn.InvalidValue, diag.Errorf("failed to write input file: %s", err) + } + + stderrWriter := newLogWriter(ctx, "stderr: ") + stdoutWriter := newLogWriter(ctx, "stdout: ") + + _, processErr := process.Background( + ctx, + args, + process.WithDir(rootPath), + process.WithStderrWriter(stderrWriter), + process.WithStdoutWriter(stdoutWriter), + ) + if processErr != nil { + logger.Debugf(ctx, "python mutator process failed: %s", processErr) + } + + pythonDiagnostics, pythonDiagnosticsErr := loadDiagnosticsFile(diagnosticsPath) + if pythonDiagnosticsErr != nil { + logger.Debugf(ctx, "failed to load diagnostics: %s", pythonDiagnosticsErr) + } + + // if diagnostics file exists, it gives the most descriptive errors + // if there is any error, we treat it as fatal error, and stop processing + if pythonDiagnostics.HasError() { + return dyn.InvalidValue, pythonDiagnostics + } + + // process can fail without reporting errors in diagnostics file or creating it, for instance, + // venv doesn't have PyDABs library installed + if processErr != nil { + return dyn.InvalidValue, diag.Errorf("python mutator process failed: %sw, use --debug to enable logging", processErr) + } + + // or we can fail to read diagnostics file, that should always be created + if pythonDiagnosticsErr != nil { + return dyn.InvalidValue, diag.Errorf("failed to load diagnostics: %s", pythonDiagnosticsErr) + } + + output, err := loadOutputFile(rootPath, outputPath) + if err != nil { + return dyn.InvalidValue, diag.Errorf("failed to load Python mutator output: %s", err) + } + + // we pass through pythonDiagnostic because it contains warnings + return output, pythonDiagnostics +} + +func writeInputFile(inputPath string, input dyn.Value) error { + // we need to marshal dyn.Value instead of bundle.Config to JSON to support + // non-string fields assigned with bundle variables + rootConfigJson, err := json.Marshal(input.AsAny()) + if err != nil { + return fmt.Errorf("failed to marshal input: %w", err) + } + + return os.WriteFile(inputPath, rootConfigJson, 0600) +} + +func loadOutputFile(rootPath string, outputPath string) (dyn.Value, error) { + outputFile, err := os.Open(outputPath) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to open output file: %w", err) + } + + defer outputFile.Close() + + // we need absolute path because later parts of pipeline assume all paths are absolute + // and this file will be used as location to resolve relative paths. + // + // virtualPath has to stay in rootPath, because locations outside root path are not allowed: + // + // Error: path /var/folders/.../pydabs/dist/*.whl is not contained in bundle root path + // + // for that, we pass virtualPath instead of outputPath as file location + virtualPath, err := filepath.Abs(filepath.Join(rootPath, "__generated_by_pydabs__.yml")) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to get absolute path: %w", err) + } + + generated, err := yamlloader.LoadYAML(virtualPath, outputFile) + if err != nil { + return dyn.InvalidValue, fmt.Errorf("failed to parse output file: %w", err) + } + + normalized, diagnostic := convert.Normalize(config.Root{}, generated) + if diagnostic.Error() != nil { + return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %w", diagnostic.Error()) + } + + // warnings shouldn't happen because output should be already normalized + // when it happens, it's a bug in the mutator, and should be treated as an error + + for _, d := range diagnostic.Filter(diag.Warning) { + return dyn.InvalidValue, fmt.Errorf("failed to normalize output: %s", d.Summary) + } + + return normalized, nil +} + +// loadDiagnosticsFile loads diagnostics from a file. +// +// It contains a list of warnings and errors that we should print to users. +// +// If the file doesn't exist, we return an error. We expect the file to always be +// created by the Python mutator, and it's absence means there are integration problems, +// and the diagnostics file was lost. If we treat non-existence as an empty diag.Diagnostics +// we risk loosing errors and warnings. +func loadDiagnosticsFile(path string) (diag.Diagnostics, error) { + file, err := os.Open(path) + if err != nil { + return nil, fmt.Errorf("failed to open diagnostics file: %w", err) + } + + defer file.Close() + + return parsePythonDiagnostics(file) +} + +func createOverrideVisitor(ctx context.Context, phase phase) (merge.OverrideVisitor, error) { + switch phase { + case PythonMutatorPhaseLoad: + return createLoadOverrideVisitor(ctx), nil + case PythonMutatorPhaseInit: + return createInitOverrideVisitor(ctx), nil + default: + return merge.OverrideVisitor{}, fmt.Errorf("unknown phase: %s", phase) + } +} + +// createLoadOverrideVisitor creates an override visitor for the load phase. +// +// During load, it's only possible to create new resources, and not modify or +// delete existing ones. +func createLoadOverrideVisitor(ctx context.Context) merge.OverrideVisitor { + resourcesPath := dyn.NewPath(dyn.Key("resources")) + jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) + + return merge.OverrideVisitor{ + VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + if isOmitemptyDelete(left) { + return merge.ErrOverrideUndoDelete + } + + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) + }, + VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + // insert 'resources' or 'resources.jobs' if it didn't exist before + if valuePath.Equal(resourcesPath) || valuePath.Equal(jobsPath) { + return right, nil + } + + if !valuePath.HasPrefix(jobsPath) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + + insertResource := len(valuePath) == len(jobsPath)+1 + + // adding a property into an existing resource is not allowed, because it changes it + if !insertResource { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + + log.Debugf(ctx, "Insert value at %q", valuePath.String()) + + return right, nil + }, + VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) + }, + } +} + +// createInitOverrideVisitor creates an override visitor for the init phase. +// +// During the init phase it's possible to create new resources, modify existing +// resources, but not delete existing resources. +func createInitOverrideVisitor(ctx context.Context) merge.OverrideVisitor { + resourcesPath := dyn.NewPath(dyn.Key("resources")) + jobsPath := dyn.NewPath(dyn.Key("resources"), dyn.Key("jobs")) + + return merge.OverrideVisitor{ + VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + if isOmitemptyDelete(left) { + return merge.ErrOverrideUndoDelete + } + + if !valuePath.HasPrefix(jobsPath) { + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) + } + + deleteResource := len(valuePath) == len(jobsPath)+1 + + if deleteResource { + return fmt.Errorf("unexpected change at %q (delete)", valuePath.String()) + } + + // deleting properties is allowed because it only changes an existing resource + log.Debugf(ctx, "Delete value at %q", valuePath.String()) + + return nil + }, + VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + // insert 'resources' or 'resources.jobs' if it didn't exist before + if valuePath.Equal(resourcesPath) || valuePath.Equal(jobsPath) { + return right, nil + } + + if !valuePath.HasPrefix(jobsPath) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (insert)", valuePath.String()) + } + + log.Debugf(ctx, "Insert value at %q", valuePath.String()) + + return right, nil + }, + VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + if !valuePath.HasPrefix(jobsPath) { + return dyn.InvalidValue, fmt.Errorf("unexpected change at %q (update)", valuePath.String()) + } + + log.Debugf(ctx, "Update value at %q", valuePath.String()) + + return right, nil + }, + } +} + +func isOmitemptyDelete(left dyn.Value) bool { + // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // there is no semantic difference between empty and missing, so we keep them as they were before + // PyDABs deleted them. + + switch left.Kind() { + case dyn.KindMap: + return left.MustMap().Len() == 0 + + case dyn.KindSequence: + return len(left.MustSequence()) == 0 + + case dyn.KindNil: + // map/sequence can be nil, for instance, bad YAML like: `foo:` + return true + + default: + return false + } +} + +// interpreterPath returns platform-specific path to Python interpreter in the virtual environment. +func interpreterPath(venvPath string) string { + if runtime.GOOS == "windows" { + return filepath.Join(venvPath, "Scripts", "python3.exe") + } else { + return filepath.Join(venvPath, "bin", "python3") + } +} diff --git a/bundle/config/mutator/python/python_mutator_test.go b/bundle/config/mutator/python/python_mutator_test.go new file mode 100644 index 000000000..588589831 --- /dev/null +++ b/bundle/config/mutator/python/python_mutator_test.go @@ -0,0 +1,623 @@ +package python + +import ( + "context" + "fmt" + "os" + "os/exec" + "path/filepath" + "reflect" + "runtime" + "testing" + + "github.com/databricks/cli/libs/dyn/merge" + + "github.com/databricks/cli/bundle/env" + "github.com/stretchr/testify/require" + + "golang.org/x/exp/maps" + + "github.com/databricks/cli/libs/dyn" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/cli/libs/process" +) + +func TestPythonMutator_Name_load(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseLoad) + + assert.Equal(t, "PythonMutator(load)", mutator.Name()) +} + +func TestPythonMutator_Name_init(t *testing.T) { + mutator := PythonMutator(PythonMutatorPhaseInit) + + assert.Equal(t, "PythonMutator(init)", mutator.Name()) +} + +func TestPythonMutator_load(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + t, + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "load", + }, + `{ + "experimental": { + "pydabs": { + "enabled": true, + "venv_path": ".venv" + } + }, + "resources": { + "jobs": { + "job0": { + name: "job_0" + }, + "job1": { + name: "job_1" + }, + } + } + }`, + `{"severity": "warning", "summary": "job doesn't have any tasks", "location": {"file": "src/examples/file.py", "line": 10, "column": 5}}`, + ) + + mutator := PythonMutator(PythonMutatorPhaseLoad) + diags := bundle.Apply(ctx, b, mutator) + + assert.NoError(t, diags.Error()) + + assert.ElementsMatch(t, []string{"job0", "job1"}, maps.Keys(b.Config.Resources.Jobs)) + + if job0, ok := b.Config.Resources.Jobs["job0"]; ok { + assert.Equal(t, "job_0", job0.Name) + } + + if job1, ok := b.Config.Resources.Jobs["job1"]; ok { + assert.Equal(t, "job_1", job1.Name) + } + + assert.Equal(t, 1, len(diags)) + assert.Equal(t, "job doesn't have any tasks", diags[0].Summary) + assert.Equal(t, dyn.Location{ + File: "src/examples/file.py", + Line: 10, + Column: 5, + }, diags[0].Location) +} + +func TestPythonMutator_load_disallowed(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + t, + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "load", + }, + `{ + "experimental": { + "pydabs": { + "enabled": true, + "venv_path": ".venv" + } + }, + "resources": { + "jobs": { + "job0": { + name: "job_0", + description: "job description" + } + } + } + }`, "") + + mutator := PythonMutator(PythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.EqualError(t, diag.Error(), "unexpected change at \"resources.jobs.job0.description\" (insert)") +} + +func TestPythonMutator_init(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + t, + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "init", + }, + `{ + "experimental": { + "pydabs": { + "enabled": true, + "venv_path": ".venv" + } + }, + "resources": { + "jobs": { + "job0": { + name: "job_0", + description: "my job" + } + } + } + }`, "") + + mutator := PythonMutator(PythonMutatorPhaseInit) + diag := bundle.Apply(ctx, b, mutator) + + assert.NoError(t, diag.Error()) + + assert.ElementsMatch(t, []string{"job0"}, maps.Keys(b.Config.Resources.Jobs)) + assert.Equal(t, "job_0", b.Config.Resources.Jobs["job0"].Name) + assert.Equal(t, "my job", b.Config.Resources.Jobs["job0"].Description) + + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + // 'name' wasn't changed, so it keeps its location + name, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.name")) + require.NoError(t, err) + assert.Equal(t, "databricks.yml", name.Location().File) + + // 'description' was updated by PyDABs and has location of generated file until + // we implement source maps + description, err := dyn.GetByPath(v, dyn.MustPathFromString("resources.jobs.job0.description")) + require.NoError(t, err) + + expectedVirtualPath, err := filepath.Abs("__generated_by_pydabs__.yml") + require.NoError(t, err) + assert.Equal(t, expectedVirtualPath, description.Location().File) + + return v, nil + }) + assert.NoError(t, err) +} + +func TestPythonMutator_badOutput(t *testing.T) { + withFakeVEnv(t, ".venv") + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: .venv + resources: + jobs: + job0: + name: job_0`) + + ctx := withProcessStub( + t, + []string{ + interpreterPath(".venv"), + "-m", + "databricks.bundles.build", + "--phase", + "load", + }, + `{ + "resources": { + "jobs": { + "job0": { + unknown_property: "my job" + } + } + } + }`, "") + + mutator := PythonMutator(PythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.EqualError(t, diag.Error(), "failed to load Python mutator output: failed to normalize output: unknown field: unknown_property") +} + +func TestPythonMutator_disabled(t *testing.T) { + b := loadYaml("databricks.yml", ``) + + ctx := context.Background() + mutator := PythonMutator(PythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.NoError(t, diag.Error()) +} + +func TestPythonMutator_venvRequired(t *testing.T) { + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true`) + + ctx := context.Background() + mutator := PythonMutator(PythonMutatorPhaseLoad) + diag := bundle.Apply(ctx, b, mutator) + + assert.Error(t, diag.Error(), "\"experimental.enable_pydabs\" is enabled, but \"experimental.venv.path\" is not set") +} + +func TestPythonMutator_venvNotFound(t *testing.T) { + expectedError := fmt.Sprintf("can't find %q, check if venv is created", interpreterPath("bad_path")) + + b := loadYaml("databricks.yml", ` + experimental: + pydabs: + enabled: true + venv_path: bad_path`) + + mutator := PythonMutator(PythonMutatorPhaseInit) + diag := bundle.Apply(context.Background(), b, mutator) + + assert.EqualError(t, diag.Error(), expectedError) +} + +type createOverrideVisitorTestCase struct { + name string + updatePath dyn.Path + deletePath dyn.Path + insertPath dyn.Path + phase phase + updateError error + deleteError error + insertError error +} + +func TestCreateOverrideVisitor(t *testing.T) { + left := dyn.V(42) + right := dyn.V(1337) + + testCases := []createOverrideVisitorTestCase{ + { + name: "load: can't change an existing job", + phase: PythonMutatorPhaseLoad, + updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), + deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), + insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), + deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (delete)"), + insertError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (insert)"), + updateError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (update)"), + }, + { + name: "load: can't delete an existing job", + phase: PythonMutatorPhaseLoad, + deletePath: dyn.MustPathFromString("resources.jobs.job0"), + deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + }, + { + name: "load: can insert 'resources'", + phase: PythonMutatorPhaseLoad, + insertPath: dyn.MustPathFromString("resources"), + insertError: nil, + }, + { + name: "load: can insert 'resources.jobs'", + phase: PythonMutatorPhaseLoad, + insertPath: dyn.MustPathFromString("resources.jobs"), + insertError: nil, + }, + { + name: "load: can insert a job", + phase: PythonMutatorPhaseLoad, + insertPath: dyn.MustPathFromString("resources.jobs.job0"), + insertError: nil, + }, + { + name: "load: can't change include", + phase: PythonMutatorPhaseLoad, + deletePath: dyn.MustPathFromString("include[0]"), + insertPath: dyn.MustPathFromString("include[0]"), + updatePath: dyn.MustPathFromString("include[0]"), + deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), + insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), + updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + }, + { + name: "init: can change an existing job", + phase: PythonMutatorPhaseInit, + updatePath: dyn.MustPathFromString("resources.jobs.job0.name"), + deletePath: dyn.MustPathFromString("resources.jobs.job0.name"), + insertPath: dyn.MustPathFromString("resources.jobs.job0.name"), + deleteError: nil, + insertError: nil, + updateError: nil, + }, + { + name: "init: can't delete an existing job", + phase: PythonMutatorPhaseInit, + deletePath: dyn.MustPathFromString("resources.jobs.job0"), + deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"), + }, + { + name: "init: can insert 'resources'", + phase: PythonMutatorPhaseInit, + insertPath: dyn.MustPathFromString("resources"), + insertError: nil, + }, + { + name: "init: can insert 'resources.jobs'", + phase: PythonMutatorPhaseInit, + insertPath: dyn.MustPathFromString("resources.jobs"), + insertError: nil, + }, + { + name: "init: can insert a job", + phase: PythonMutatorPhaseInit, + insertPath: dyn.MustPathFromString("resources.jobs.job0"), + insertError: nil, + }, + { + name: "init: can't change include", + phase: PythonMutatorPhaseInit, + deletePath: dyn.MustPathFromString("include[0]"), + insertPath: dyn.MustPathFromString("include[0]"), + updatePath: dyn.MustPathFromString("include[0]"), + deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"), + insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"), + updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"), + }, + } + + for _, tc := range testCases { + visitor, err := createOverrideVisitor(context.Background(), tc.phase) + if err != nil { + t.Fatalf("create visitor failed: %v", err) + } + + if tc.updatePath != nil { + t.Run(tc.name+"-update", func(t *testing.T) { + out, err := visitor.VisitUpdate(tc.updatePath, left, right) + + if tc.updateError != nil { + assert.Equal(t, tc.updateError, err) + } else { + assert.NoError(t, err) + assert.Equal(t, right, out) + } + }) + } + + if tc.deletePath != nil { + t.Run(tc.name+"-delete", func(t *testing.T) { + err := visitor.VisitDelete(tc.deletePath, left) + + if tc.deleteError != nil { + assert.Equal(t, tc.deleteError, err) + } else { + assert.NoError(t, err) + } + }) + } + + if tc.insertPath != nil { + t.Run(tc.name+"-insert", func(t *testing.T) { + out, err := visitor.VisitInsert(tc.insertPath, right) + + if tc.insertError != nil { + assert.Equal(t, tc.insertError, err) + } else { + assert.NoError(t, err) + assert.Equal(t, right, out) + } + }) + } + } +} + +type overrideVisitorOmitemptyTestCase struct { + name string + path dyn.Path + left dyn.Value + phases []phase + expectedErr error +} + +func TestCreateOverrideVisitor_omitempty(t *testing.T) { + // PyDABs can omit empty sequences/mappings in output, because we don't track them as optional, + // there is no semantic difference between empty and missing, so we keep them as they were before + // PyDABs deleted them. + + allPhases := []phase{PythonMutatorPhaseLoad, PythonMutatorPhaseInit} + location := dyn.Location{ + File: "databricks.yml", + Line: 10, + Column: 20, + } + + testCases := []overrideVisitorOmitemptyTestCase{ + { + // this is not happening, but adding for completeness + name: "undo delete of empty variables", + path: dyn.MustPathFromString("variables"), + left: dyn.NewValue([]dyn.Value{}, []dyn.Location{location}), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + { + name: "undo delete of empty job clusters", + path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"), + left: dyn.NewValue([]dyn.Value{}, []dyn.Location{location}), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + { + name: "allow delete of non-empty job clusters", + path: dyn.MustPathFromString("resources.jobs.job0.job_clusters"), + left: dyn.NewValue([]dyn.Value{dyn.NewValue("abc", []dyn.Location{location})}, []dyn.Location{location}), + expectedErr: nil, + // deletions aren't allowed in 'load' phase + phases: []phase{PythonMutatorPhaseInit}, + }, + { + name: "undo delete of empty tags", + path: dyn.MustPathFromString("resources.jobs.job0.tags"), + left: dyn.NewValue(map[string]dyn.Value{}, []dyn.Location{location}), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + { + name: "allow delete of non-empty tags", + path: dyn.MustPathFromString("resources.jobs.job0.tags"), + left: dyn.NewValue(map[string]dyn.Value{"dev": dyn.NewValue("true", []dyn.Location{location})}, []dyn.Location{location}), + + expectedErr: nil, + // deletions aren't allowed in 'load' phase + phases: []phase{PythonMutatorPhaseInit}, + }, + { + name: "undo delete of nil", + path: dyn.MustPathFromString("resources.jobs.job0.tags"), + left: dyn.NilValue.WithLocations([]dyn.Location{location}), + expectedErr: merge.ErrOverrideUndoDelete, + phases: allPhases, + }, + } + + for _, tc := range testCases { + for _, phase := range tc.phases { + t.Run(tc.name+"-"+string(phase), func(t *testing.T) { + visitor, err := createOverrideVisitor(context.Background(), phase) + require.NoError(t, err) + + err = visitor.VisitDelete(tc.path, tc.left) + + assert.Equal(t, tc.expectedErr, err) + }) + } + } +} + +func TestLoadDiagnosticsFile_nonExistent(t *testing.T) { + // this is an important behaviour, see loadDiagnosticsFile docstring + _, err := loadDiagnosticsFile("non_existent_file.json") + + assert.Error(t, err) +} + +func TestInterpreterPath(t *testing.T) { + if runtime.GOOS == "windows" { + assert.Equal(t, "venv\\Scripts\\python3.exe", interpreterPath("venv")) + } else { + assert.Equal(t, "venv/bin/python3", interpreterPath("venv")) + } +} + +func withProcessStub(t *testing.T, args []string, output string, diagnostics string) context.Context { + ctx := context.Background() + ctx, stub := process.WithStub(ctx) + + t.Setenv(env.TempDirVariable, t.TempDir()) + + // after we override env variable, we always get the same cache dir as mutator + cacheDir, err := createCacheDir(ctx) + require.NoError(t, err) + + inputPath := filepath.Join(cacheDir, "input.json") + outputPath := filepath.Join(cacheDir, "output.json") + diagnosticsPath := filepath.Join(cacheDir, "diagnostics.json") + + args = append(args, "--input", inputPath) + args = append(args, "--output", outputPath) + args = append(args, "--diagnostics", diagnosticsPath) + + stub.WithCallback(func(actual *exec.Cmd) error { + _, err := os.Stat(inputPath) + assert.NoError(t, err) + + if reflect.DeepEqual(actual.Args, args) { + err := os.WriteFile(outputPath, []byte(output), 0600) + require.NoError(t, err) + + err = os.WriteFile(diagnosticsPath, []byte(diagnostics), 0600) + require.NoError(t, err) + + return nil + } else { + return fmt.Errorf("unexpected command: %v", actual.Args) + } + }) + + return ctx +} + +func loadYaml(name string, content string) *bundle.Bundle { + v, diag := config.LoadFromBytes(name, []byte(content)) + + if diag.Error() != nil { + panic(diag.Error()) + } + + return &bundle.Bundle{ + Config: *v, + } +} + +func withFakeVEnv(t *testing.T, path string) { + interpreterPath := interpreterPath(path) + + cwd, err := os.Getwd() + if err != nil { + panic(err) + } + + if err := os.Chdir(t.TempDir()); err != nil { + panic(err) + } + + err = os.MkdirAll(filepath.Dir(interpreterPath), 0755) + if err != nil { + panic(err) + } + + err = os.WriteFile(interpreterPath, []byte(""), 0755) + if err != nil { + panic(err) + } + + t.Cleanup(func() { + if err := os.Chdir(cwd); err != nil { + panic(err) + } + }) +} diff --git a/bundle/config/mutator/resolve_resource_references.go b/bundle/config/mutator/resolve_resource_references.go index 7a7462ab9..89eaa346c 100644 --- a/bundle/config/mutator/resolve_resource_references.go +++ b/bundle/config/mutator/resolve_resource_references.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" "golang.org/x/sync/errgroup" ) @@ -15,7 +16,7 @@ func ResolveResourceReferences() bundle.Mutator { return &resolveResourceReferences{} } -func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { errs, errCtx := errgroup.WithContext(ctx) for k := range b.Config.Variables { @@ -40,7 +41,7 @@ func (m *resolveResourceReferences) Apply(ctx context.Context, b *bundle.Bundle) }) } - return errs.Wait() + return diag.FromErr(errs.Wait()) } func (*resolveResourceReferences) Name() string { diff --git a/bundle/config/mutator/resolve_resource_references_test.go b/bundle/config/mutator/resolve_resource_references_test.go index 4d51285c6..86a03b23e 100644 --- a/bundle/config/mutator/resolve_resource_references_test.go +++ b/bundle/config/mutator/resolve_resource_references_test.go @@ -8,11 +8,13 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/env" "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/iam" ) func TestResolveClusterReference(t *testing.T) { @@ -33,7 +35,7 @@ func TestResolveClusterReference(t *testing.T) { }, }, "some-variable": { - Value: &justString, + Value: justString, }, }, }, @@ -49,10 +51,10 @@ func TestResolveClusterReference(t *testing.T) { ClusterId: "9876-5432-xywz", }, nil) - err := bundle.Apply(context.Background(), b, ResolveResourceReferences()) - require.NoError(t, err) - require.Equal(t, "1234-5678-abcd", *b.Config.Variables["my-cluster-id-1"].Value) - require.Equal(t, "9876-5432-xywz", *b.Config.Variables["my-cluster-id-2"].Value) + diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) + require.NoError(t, diags.Error()) + require.Equal(t, "1234-5678-abcd", b.Config.Variables["my-cluster-id-1"].Value) + require.Equal(t, "9876-5432-xywz", b.Config.Variables["my-cluster-id-2"].Value) } func TestResolveNonExistentClusterReference(t *testing.T) { @@ -67,7 +69,7 @@ func TestResolveNonExistentClusterReference(t *testing.T) { }, }, "some-variable": { - Value: &justString, + Value: justString, }, }, }, @@ -78,8 +80,8 @@ func TestResolveNonExistentClusterReference(t *testing.T) { clusterApi := m.GetMockClustersAPI() clusterApi.EXPECT().GetByClusterName(mock.Anything, clusterRef).Return(nil, fmt.Errorf("ClusterDetails named '%s' does not exist", clusterRef)) - err := bundle.Apply(context.Background(), b, ResolveResourceReferences()) - require.ErrorContains(t, err, "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist") + diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) + require.ErrorContains(t, diags.Error(), "failed to resolve cluster: Random, err: ClusterDetails named 'Random' does not exist") } func TestNoLookupIfVariableIsSet(t *testing.T) { @@ -101,7 +103,119 @@ func TestNoLookupIfVariableIsSet(t *testing.T) { b.Config.Variables["my-cluster-id"].Set("random value") - err := bundle.Apply(context.Background(), b, ResolveResourceReferences()) - require.NoError(t, err) - require.Equal(t, "random value", *b.Config.Variables["my-cluster-id"].Value) + diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) + require.NoError(t, diags.Error()) + require.Equal(t, "random value", b.Config.Variables["my-cluster-id"].Value) +} + +func TestResolveServicePrincipal(t *testing.T) { + spName := "Some SP name" + b := &bundle.Bundle{ + Config: config.Root{ + Variables: map[string]*variable.Variable{ + "my-sp": { + Lookup: &variable.Lookup{ + ServicePrincipal: spName, + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + spApi := m.GetMockServicePrincipalsAPI() + spApi.EXPECT().GetByDisplayName(mock.Anything, spName).Return(&iam.ServicePrincipal{ + Id: "1234", + ApplicationId: "app-1234", + }, nil) + + diags := bundle.Apply(context.Background(), b, ResolveResourceReferences()) + require.NoError(t, diags.Error()) + require.Equal(t, "app-1234", b.Config.Variables["my-sp"].Value) +} + +func TestResolveVariableReferencesInVariableLookups(t *testing.T) { + s := "bar" + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Target: "dev", + }, + Variables: map[string]*variable.Variable{ + "foo": { + Value: s, + }, + "lookup": { + Lookup: &variable.Lookup{ + Cluster: "cluster-${var.foo}-${bundle.target}", + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + clusterApi := m.GetMockClustersAPI() + clusterApi.EXPECT().GetByClusterName(mock.Anything, "cluster-bar-dev").Return(&compute.ClusterDetails{ + ClusterId: "1234-5678-abcd", + }, nil) + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences())) + require.NoError(t, diags.Error()) + require.Equal(t, "cluster-bar-dev", b.Config.Variables["lookup"].Lookup.Cluster) + require.Equal(t, "1234-5678-abcd", b.Config.Variables["lookup"].Value) +} + +func TestResolveLookupVariableReferencesInVariableLookups(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Variables: map[string]*variable.Variable{ + "another_lookup": { + Lookup: &variable.Lookup{ + Cluster: "cluster", + }, + }, + "lookup": { + Lookup: &variable.Lookup{ + Cluster: "cluster-${var.another_lookup}", + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + diags := bundle.Apply(context.Background(), b, bundle.Seq(ResolveVariableReferencesInLookup(), ResolveResourceReferences())) + require.ErrorContains(t, diags.Error(), "lookup variables cannot contain references to another lookup variables") +} + +func TestNoResolveLookupIfVariableSetWithEnvVariable(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Target: "dev", + }, + Variables: map[string]*variable.Variable{ + "lookup": { + Lookup: &variable.Lookup{ + Cluster: "cluster-${bundle.target}", + }, + }, + }, + }, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + ctx := context.Background() + ctx = env.Set(ctx, "BUNDLE_VAR_lookup", "1234-5678-abcd") + + diags := bundle.Apply(ctx, b, bundle.Seq(SetVariables(), ResolveVariableReferencesInLookup(), ResolveResourceReferences())) + require.NoError(t, diags.Error()) + require.Equal(t, "1234-5678-abcd", b.Config.Variables["lookup"].Value) } diff --git a/bundle/config/mutator/resolve_variable_references.go b/bundle/config/mutator/resolve_variable_references.go new file mode 100644 index 000000000..61940be56 --- /dev/null +++ b/bundle/config/mutator/resolve_variable_references.go @@ -0,0 +1,193 @@ +package mutator + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/dynvar" + "github.com/databricks/cli/libs/log" +) + +type resolveVariableReferences struct { + prefixes []string + pattern dyn.Pattern + lookupFn func(dyn.Value, dyn.Path) (dyn.Value, error) + skipFn func(dyn.Value) bool +} + +func ResolveVariableReferences(prefixes ...string) bundle.Mutator { + return &resolveVariableReferences{prefixes: prefixes, lookupFn: lookup} +} + +func ResolveVariableReferencesInLookup() bundle.Mutator { + return &resolveVariableReferences{prefixes: []string{ + "bundle", + "workspace", + "variables", + }, pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("lookup")), lookupFn: lookupForVariables} +} + +func ResolveVariableReferencesInComplexVariables() bundle.Mutator { + return &resolveVariableReferences{prefixes: []string{ + "bundle", + "workspace", + "variables", + }, + pattern: dyn.NewPattern(dyn.Key("variables"), dyn.AnyKey(), dyn.Key("value")), + lookupFn: lookupForComplexVariables, + skipFn: skipResolvingInNonComplexVariables, + } +} + +func lookup(v dyn.Value, path dyn.Path) (dyn.Value, error) { + // Future opportunity: if we lookup this path in both the given root + // and the synthesized root, we know if it was explicitly set or implied to be empty. + // Then we can emit a warning if it was not explicitly set. + return dyn.GetByPath(v, path) +} + +func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { + if path[0].Key() != "variables" { + return lookup(v, path) + } + + varV, err := dyn.GetByPath(v, path[:len(path)-1]) + if err != nil { + return dyn.InvalidValue, err + } + + var vv variable.Variable + err = convert.ToTyped(&vv, varV) + if err != nil { + return dyn.InvalidValue, err + } + + if vv.Type == variable.VariableTypeComplex { + return dyn.InvalidValue, fmt.Errorf("complex variables cannot contain references to another complex variables") + } + + return lookup(v, path) +} + +func skipResolvingInNonComplexVariables(v dyn.Value) bool { + switch v.Kind() { + case dyn.KindMap, dyn.KindSequence: + return false + default: + return true + } +} + +func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) { + if path[0].Key() != "variables" { + return lookup(v, path) + } + + varV, err := dyn.GetByPath(v, path[:len(path)-1]) + if err != nil { + return dyn.InvalidValue, err + } + + var vv variable.Variable + err = convert.ToTyped(&vv, varV) + if err != nil { + return dyn.InvalidValue, err + } + + if vv.Lookup != nil && vv.Lookup.String() != "" { + return dyn.InvalidValue, fmt.Errorf("lookup variables cannot contain references to another lookup variables") + } + + return lookup(v, path) +} + +func (*resolveVariableReferences) Name() string { + return "ResolveVariableReferences" +} + +func (m *resolveVariableReferences) Validate(ctx context.Context, b *bundle.Bundle) error { + return nil +} + +func (m *resolveVariableReferences) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + prefixes := make([]dyn.Path, len(m.prefixes)) + for i, prefix := range m.prefixes { + prefixes[i] = dyn.MustPathFromString(prefix) + } + + // The path ${var.foo} is a shorthand for ${variables.foo.value}. + // We rewrite it here to make the resolution logic simpler. + varPath := dyn.NewPath(dyn.Key("var")) + + err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + // Synthesize a copy of the root that has all fields that are present in the type + // but not set in the dynamic value set to their corresponding empty value. + // This enables users to interpolate variable references to fields that haven't + // been explicitly set in the dynamic value. + // + // For example: ${bundle.git.origin_url} should resolve to an empty string + // if a bundle isn't located in a Git repository (yet). + // + // This is consistent with the behavior prior to using the dynamic value system. + // + // We can ignore the diagnostics return value because we know that the dynamic value + // has already been normalized when it was first loaded from the configuration file. + // + normalized, _ := convert.Normalize(b.Config, root, convert.IncludeMissingFields) + + // If the pattern is nil, we resolve references in the entire configuration. + root, err := dyn.MapByPattern(root, m.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + // Resolve variable references in all values. + return dynvar.Resolve(v, func(path dyn.Path) (dyn.Value, error) { + // Rewrite the shorthand path ${var.foo} into ${variables.foo.value}. + if path.HasPrefix(varPath) { + newPath := dyn.NewPath( + dyn.Key("variables"), + path[1], + dyn.Key("value"), + ) + + if len(path) > 2 { + newPath = newPath.Append(path[2:]...) + } + + path = newPath + } + + // Perform resolution only if the path starts with one of the specified prefixes. + for _, prefix := range prefixes { + if path.HasPrefix(prefix) { + // Skip resolution if there is a skip function and it returns true. + if m.skipFn != nil && m.skipFn(v) { + return dyn.InvalidValue, dynvar.ErrSkipResolution + } + return m.lookupFn(normalized, path) + } + } + + return dyn.InvalidValue, dynvar.ErrSkipResolution + }) + }) + + if err != nil { + return dyn.InvalidValue, err + } + + // Normalize the result because variable resolution may have been applied to non-string fields. + // For example, a variable reference may have been resolved to a integer. + root, diags := convert.Normalize(b.Config, root) + for _, diag := range diags { + // This occurs when a variable's resolved value is incompatible with the field's type. + // Log a warning until we have a better way to surface these diagnostics to the user. + log.Warnf(ctx, "normalization diagnostic: %s", diag.Summary) + } + return root, nil + }) + + return diag.FromErr(err) +} diff --git a/bundle/config/mutator/resolve_variable_references_test.go b/bundle/config/mutator/resolve_variable_references_test.go new file mode 100644 index 000000000..7bb6f11a0 --- /dev/null +++ b/bundle/config/mutator/resolve_variable_references_test.go @@ -0,0 +1,436 @@ +package mutator + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestResolveVariableReferences(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Workspace: config.Workspace{ + RootPath: "${bundle.name}/bar", + FilePath: "${workspace.root_path}/baz", + }, + }, + } + + // Apply with an invalid prefix. This should not change the workspace root path. + diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("doesntexist")) + require.NoError(t, diags.Error()) + require.Equal(t, "${bundle.name}/bar", b.Config.Workspace.RootPath) + require.Equal(t, "${workspace.root_path}/baz", b.Config.Workspace.FilePath) + + // Apply with a valid prefix. This should change the workspace root path. + diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "workspace")) + require.NoError(t, diags.Error()) + require.Equal(t, "example/bar", b.Config.Workspace.RootPath) + require.Equal(t, "example/bar/baz", b.Config.Workspace.FilePath) +} + +func TestResolveVariableReferencesToBundleVariables(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Workspace: config.Workspace{ + RootPath: "${bundle.name}/${var.foo}", + }, + Variables: map[string]*variable.Variable{ + "foo": { + Value: "bar", + }, + }, + }, + } + + // Apply with a valid prefix. This should change the workspace root path. + diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle", "variables")) + require.NoError(t, diags.Error()) + require.Equal(t, "example/bar", b.Config.Workspace.RootPath) +} + +func TestResolveVariableReferencesToEmptyFields(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + Git: config.Git{ + Branch: "", + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tags: map[string]string{ + "git_branch": "${bundle.git.branch}", + }, + }, + }, + }, + }, + }, + } + + // Apply for the bundle prefix. + diags := bundle.Apply(context.Background(), b, ResolveVariableReferences("bundle")) + require.NoError(t, diags.Error()) + + // The job settings should have been interpolated to an empty string. + require.Equal(t, "", b.Config.Resources.Jobs["job1"].JobSettings.Tags["git_branch"]) +} + +func TestResolveVariableReferencesForPrimitiveNonStringFields(t *testing.T) { + var diags diag.Diagnostics + + b := &bundle.Bundle{ + Config: config.Root{ + Variables: map[string]*variable.Variable{ + "no_alert_for_canceled_runs": {}, + "no_alert_for_skipped_runs": {}, + "min_workers": {}, + "max_workers": {}, + "spot_bid_max_price": {}, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + NotificationSettings: &jobs.JobNotificationSettings{ + NoAlertForCanceledRuns: false, + NoAlertForSkippedRuns: false, + }, + Tasks: []jobs.Task{ + { + NewCluster: &compute.ClusterSpec{ + Autoscale: &compute.AutoScale{ + MinWorkers: 0, + MaxWorkers: 0, + }, + AzureAttributes: &compute.AzureAttributes{ + SpotBidMaxPrice: 0.0, + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Initialize the variables. + diags = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.InitializeVariables([]string{ + "no_alert_for_canceled_runs=true", + "no_alert_for_skipped_runs=true", + "min_workers=1", + "max_workers=2", + "spot_bid_max_price=0.5", + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + // Assign the variables to the dynamic configuration. + diags = bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + // Set the notification settings. + p = dyn.MustPathFromString("resources.jobs.job1.notification_settings") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("no_alert_for_canceled_runs")), dyn.V("${var.no_alert_for_canceled_runs}")) + require.NoError(t, err) + v, err = dyn.SetByPath(v, p.Append(dyn.Key("no_alert_for_skipped_runs")), dyn.V("${var.no_alert_for_skipped_runs}")) + require.NoError(t, err) + + // Set the min and max workers. + p = dyn.MustPathFromString("resources.jobs.job1.tasks[0].new_cluster.autoscale") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("min_workers")), dyn.V("${var.min_workers}")) + require.NoError(t, err) + v, err = dyn.SetByPath(v, p.Append(dyn.Key("max_workers")), dyn.V("${var.max_workers}")) + require.NoError(t, err) + + // Set the spot bid max price. + p = dyn.MustPathFromString("resources.jobs.job1.tasks[0].new_cluster.azure_attributes") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("spot_bid_max_price")), dyn.V("${var.spot_bid_max_price}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + // Apply for the variable prefix. This should resolve the variables to their values. + diags = bundle.Apply(context.Background(), b, ResolveVariableReferences("variables")) + require.NoError(t, diags.Error()) + assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForCanceledRuns) + assert.Equal(t, true, b.Config.Resources.Jobs["job1"].JobSettings.NotificationSettings.NoAlertForSkippedRuns) + assert.Equal(t, 1, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MinWorkers) + assert.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.Autoscale.MaxWorkers) + assert.Equal(t, 0.5, b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].NewCluster.AzureAttributes.SpotBidMaxPrice) +} + +func TestResolveComplexVariable(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "cluster": { + Value: map[string]any{ + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "random", + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0]") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("new_cluster")), dyn.V("${var.cluster}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables")) + require.NoError(t, diags.Error()) + require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, 2, b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NumWorkers) +} + +func TestResolveComplexVariableReferencesToFields(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "cluster": { + Value: map[string]any{ + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "random", + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0].new_cluster") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("node_type_id")), dyn.V("${var.cluster.node_type_id}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, ResolveVariableReferences("bundle", "workspace", "variables")) + require.NoError(t, diags.Error()) + require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["job1"].JobSettings.JobClusters[0].NewCluster.NodeTypeId) +} + +func TestResolveComplexVariableReferencesWithComplexVariablesError(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "cluster": { + Value: map[string]any{ + "node_type_id": "Standard_DS3_v2", + "num_workers": 2, + "spark_conf": "${var.spark_conf}", + }, + Type: variable.VariableTypeComplex, + }, + "spark_conf": { + Value: map[string]any{ + "spark.executor.memory": "4g", + "spark.executor.cores": "2", + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + JobClusters: []jobs.JobCluster{ + { + NewCluster: compute.ClusterSpec{ + NodeTypeId: "random", + }, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.job_clusters[0]") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("new_cluster")), dyn.V("${var.cluster}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, bundle.Seq(ResolveVariableReferencesInComplexVariables(), ResolveVariableReferences("bundle", "workspace", "variables"))) + require.ErrorContains(t, diags.Error(), "complex variables cannot contain references to another complex variables") +} + +func TestResolveComplexVariableWithVarReference(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Variables: map[string]*variable.Variable{ + "package_version": { + Value: "1.0.0", + }, + "cluster_libraries": { + Value: [](map[string]any){ + { + "pypi": map[string]string{ + "package": "cicd_template==${var.package_version}", + }, + }, + }, + Type: variable.VariableTypeComplex, + }, + }, + + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{}, + }, + }, + }, + }, + }, + }, + }, + } + + ctx := context.Background() + + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var p dyn.Path + var err error + + p = dyn.MustPathFromString("resources.jobs.job1.tasks[0]") + v, err = dyn.SetByPath(v, p.Append(dyn.Key("libraries")), dyn.V("${var.cluster_libraries}")) + require.NoError(t, err) + + return v, nil + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, bundle.Seq( + ResolveVariableReferencesInComplexVariables(), + ResolveVariableReferences("bundle", "workspace", "variables"), + )) + require.NoError(t, diags.Error()) + require.Equal(t, "cicd_template==1.0.0", b.Config.Resources.Jobs["job1"].JobSettings.Tasks[0].Libraries[0].Pypi.Package) +} diff --git a/bundle/config/mutator/rewrite_sync_paths.go b/bundle/config/mutator/rewrite_sync_paths.go new file mode 100644 index 000000000..cfdc55f36 --- /dev/null +++ b/bundle/config/mutator/rewrite_sync_paths.go @@ -0,0 +1,61 @@ +package mutator + +import ( + "context" + "path/filepath" + + "github.com/databricks/cli/bundle" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type rewriteSyncPaths struct{} + +func RewriteSyncPaths() bundle.Mutator { + return &rewriteSyncPaths{} +} + +func (m *rewriteSyncPaths) Name() string { + return "RewriteSyncPaths" +} + +// makeRelativeTo returns a dyn.MapFunc that joins the relative path +// of the file it was defined in w.r.t. the bundle root path, with +// the contents of the string node. +// +// For example: +// - The bundle root is /foo +// - The configuration file that defines the string node is at /foo/bar/baz.yml +// - The string node contains "somefile.*" +// +// Then the resulting value will be "bar/somefile.*". +func (m *rewriteSyncPaths) makeRelativeTo(root string) dyn.MapFunc { + return func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + dir := filepath.Dir(v.Location().File) + rel, err := filepath.Rel(root, dir) + if err != nil { + return dyn.InvalidValue, err + } + + return dyn.NewValue(filepath.Join(rel, v.MustString()), v.Locations()), nil + } +} + +func (m *rewriteSyncPaths) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + return dyn.Map(v, "sync", func(_ dyn.Path, v dyn.Value) (nv dyn.Value, err error) { + v, err = dyn.Map(v, "include", dyn.Foreach(m.makeRelativeTo(b.RootPath))) + if err != nil { + return dyn.InvalidValue, err + } + v, err = dyn.Map(v, "exclude", dyn.Foreach(m.makeRelativeTo(b.RootPath))) + if err != nil { + return dyn.InvalidValue, err + } + return v, nil + }) + }) + + return diag.FromErr(err) +} diff --git a/bundle/config/mutator/rewrite_sync_paths_test.go b/bundle/config/mutator/rewrite_sync_paths_test.go new file mode 100644 index 000000000..56ada19e6 --- /dev/null +++ b/bundle/config/mutator/rewrite_sync_paths_test.go @@ -0,0 +1,101 @@ +package mutator_test + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/stretchr/testify/assert" +) + +func TestRewriteSyncPathsRelative(t *testing.T) { + b := &bundle.Bundle{ + RootPath: ".", + Config: config.Root{ + Sync: config.Sync{ + Include: []string{ + "foo", + "bar", + }, + Exclude: []string{ + "baz", + "qux", + }, + }, + }, + } + + bundletest.SetLocation(b, "sync.include[0]", "./file.yml") + bundletest.SetLocation(b, "sync.include[1]", "./a/file.yml") + bundletest.SetLocation(b, "sync.exclude[0]", "./a/b/file.yml") + bundletest.SetLocation(b, "sync.exclude[1]", "./a/b/c/file.yml") + + diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, diags.Error()) + + assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0]) + assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1]) + assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0]) + assert.Equal(t, filepath.Clean("a/b/c/qux"), b.Config.Sync.Exclude[1]) +} + +func TestRewriteSyncPathsAbsolute(t *testing.T) { + b := &bundle.Bundle{ + RootPath: "/tmp/dir", + Config: config.Root{ + Sync: config.Sync{ + Include: []string{ + "foo", + "bar", + }, + Exclude: []string{ + "baz", + "qux", + }, + }, + }, + } + + bundletest.SetLocation(b, "sync.include[0]", "/tmp/dir/file.yml") + bundletest.SetLocation(b, "sync.include[1]", "/tmp/dir/a/file.yml") + bundletest.SetLocation(b, "sync.exclude[0]", "/tmp/dir/a/b/file.yml") + bundletest.SetLocation(b, "sync.exclude[1]", "/tmp/dir/a/b/c/file.yml") + + diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, diags.Error()) + + assert.Equal(t, filepath.Clean("foo"), b.Config.Sync.Include[0]) + assert.Equal(t, filepath.Clean("a/bar"), b.Config.Sync.Include[1]) + assert.Equal(t, filepath.Clean("a/b/baz"), b.Config.Sync.Exclude[0]) + assert.Equal(t, filepath.Clean("a/b/c/qux"), b.Config.Sync.Exclude[1]) +} + +func TestRewriteSyncPathsErrorPaths(t *testing.T) { + t.Run("no sync block", func(t *testing.T) { + b := &bundle.Bundle{ + RootPath: ".", + } + + diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, diags.Error()) + }) + + t.Run("empty include/exclude blocks", func(t *testing.T) { + b := &bundle.Bundle{ + RootPath: ".", + Config: config.Root{ + Sync: config.Sync{ + Include: []string{}, + Exclude: []string{}, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, mutator.RewriteSyncPaths()) + assert.NoError(t, diags.Error()) + }) +} diff --git a/bundle/config/mutator/run_as.go b/bundle/config/mutator/run_as.go index 7d1a49175..d344a988a 100644 --- a/bundle/config/mutator/run_as.go +++ b/bundle/config/mutator/run_as.go @@ -2,19 +2,26 @@ package mutator import ( "context" + "fmt" "slices" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/databricks-sdk-go/service/jobs" ) type setRunAs struct { } -// SetRunAs mutator is used to go over defined resources such as Jobs and DLT Pipelines -// And set correct execution identity ("run_as" for a job or "is_owner" permission for DLT) -// if top-level "run-as" section is defined in the configuration. +// This mutator does two things: +// +// 1. Sets the run_as field for jobs to the value of the run_as field in the bundle. +// +// 2. Validates that the bundle run_as configuration is valid in the context of the bundle. +// If the run_as user is different from the current deployment user, DABs only +// supports a subset of resources. func SetRunAs() bundle.Mutator { return &setRunAs{} } @@ -23,10 +30,99 @@ func (m *setRunAs) Name() string { return "SetRunAs" } -func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error { +type errUnsupportedResourceTypeForRunAs struct { + resourceType string + resourceLocation dyn.Location + currentUser string + runAsUser string +} + +func (e errUnsupportedResourceTypeForRunAs) Error() string { + return fmt.Sprintf("%s are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s. Current identity: %s. Run as identity: %s", e.resourceType, e.resourceLocation, e.currentUser, e.runAsUser) +} + +type errBothSpAndUserSpecified struct { + spName string + spLoc dyn.Location + userName string + userLoc dyn.Location +} + +func (e errBothSpAndUserSpecified) Error() string { + return fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name %q is specified at %s. A user_name %q is defined at %s", e.spName, e.spLoc, e.userName, e.userLoc) +} + +func validateRunAs(b *bundle.Bundle) error { + neitherSpecifiedErr := fmt.Errorf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s", b.Config.GetLocation("run_as")) + // Error if neither service_principal_name nor user_name are specified, but the + // run_as section is present. + if b.Config.Value().Get("run_as").Kind() == dyn.KindNil { + return neitherSpecifiedErr + } + // Error if one or both of service_principal_name and user_name are specified, + // but with empty values. + if b.Config.RunAs.ServicePrincipalName == "" && b.Config.RunAs.UserName == "" { + return neitherSpecifiedErr + } + + // Error if both service_principal_name and user_name are specified + runAs := b.Config.RunAs + if runAs.UserName != "" && runAs.ServicePrincipalName != "" { + return errBothSpAndUserSpecified{ + spName: runAs.ServicePrincipalName, + userName: runAs.UserName, + spLoc: b.Config.GetLocation("run_as.service_principal_name"), + userLoc: b.Config.GetLocation("run_as.user_name"), + } + } + + identity := runAs.ServicePrincipalName + if identity == "" { + identity = runAs.UserName + } + + // All resources are supported if the run_as identity is the same as the current deployment identity. + if identity == b.Config.Workspace.CurrentUser.UserName { + return nil + } + + // DLT pipelines do not support run_as in the API. + if len(b.Config.Resources.Pipelines) > 0 { + return errUnsupportedResourceTypeForRunAs{ + resourceType: "pipelines", + resourceLocation: b.Config.GetLocation("resources.pipelines"), + currentUser: b.Config.Workspace.CurrentUser.UserName, + runAsUser: identity, + } + } + + // Model serving endpoints do not support run_as in the API. + if len(b.Config.Resources.ModelServingEndpoints) > 0 { + return errUnsupportedResourceTypeForRunAs{ + resourceType: "model_serving_endpoints", + resourceLocation: b.Config.GetLocation("resources.model_serving_endpoints"), + currentUser: b.Config.Workspace.CurrentUser.UserName, + runAsUser: identity, + } + } + + // Monitors do not support run_as in the API. + if len(b.Config.Resources.QualityMonitors) > 0 { + return errUnsupportedResourceTypeForRunAs{ + resourceType: "quality_monitors", + resourceLocation: b.Config.GetLocation("resources.quality_monitors"), + currentUser: b.Config.Workspace.CurrentUser.UserName, + runAsUser: identity, + } + } + + return nil +} + +func setRunAsForJobs(b *bundle.Bundle) { runAs := b.Config.RunAs if runAs == nil { - return nil + return } for i := range b.Config.Resources.Jobs { @@ -39,13 +135,22 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error { UserName: runAs.UserName, } } +} + +// Legacy behavior of run_as for DLT pipelines. Available under the experimental.use_run_as_legacy flag. +// Only available to unblock customers stuck due to breaking changes in https://github.com/databricks/cli/pull/1233 +func setPipelineOwnersToRunAsIdentity(b *bundle.Bundle) { + runAs := b.Config.RunAs + if runAs == nil { + return + } me := b.Config.Workspace.CurrentUser.UserName // If user deploying the bundle and the one defined in run_as are the same // Do not add IS_OWNER permission. Current user is implied to be an owner in this case. // Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407 if runAs.UserName == me || runAs.ServicePrincipalName == me { - return nil + return } for i := range b.Config.Resources.Pipelines { @@ -60,6 +165,32 @@ func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error { UserName: runAs.UserName, }) } +} +func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + // Mutator is a no-op if run_as is not specified in the bundle + if b.Config.Value().Get("run_as").Kind() == dyn.KindInvalid { + return nil + } + + if b.Config.Experimental != nil && b.Config.Experimental.UseLegacyRunAs { + setPipelineOwnersToRunAsIdentity(b) + setRunAsForJobs(b) + return diag.Diagnostics{ + { + Severity: diag.Warning, + Summary: "You are using the legacy mode of run_as. The support for this mode is experimental and might be removed in a future release of the CLI. In order to run the DLT pipelines in your DAB as the run_as user this mode changes the owners of the pipelines to the run_as identity, which requires the user deploying the bundle to be a workspace admin, and also a Metastore admin if the pipeline target is in UC.", + Path: dyn.MustPathFromString("experimental.use_legacy_run_as"), + Location: b.Config.GetLocation("experimental.use_legacy_run_as"), + }, + } + } + + // Assert the run_as configuration is valid in the context of the bundle + if err := validateRunAs(b); err != nil { + return diag.FromErr(err) + } + + setRunAsForJobs(b) return nil } diff --git a/bundle/config/mutator/run_as_test.go b/bundle/config/mutator/run_as_test.go new file mode 100644 index 000000000..67bf7bcc2 --- /dev/null +++ b/bundle/config/mutator/run_as_test.go @@ -0,0 +1,194 @@ +package mutator + +import ( + "context" + "slices" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func allResourceTypes(t *testing.T) []string { + // Compute supported resource types based on the `Resources{}` struct. + r := &config.Resources{} + rv, err := convert.FromTyped(r, dyn.NilValue) + require.NoError(t, err) + normalized, _ := convert.Normalize(r, rv, convert.IncludeMissingFields) + resourceTypes := []string{} + for _, k := range normalized.MustMap().Keys() { + resourceTypes = append(resourceTypes, k.MustString()) + } + slices.Sort(resourceTypes) + + // Assert the total list of resource supported, as a sanity check that using + // the dyn library gives us the correct list of all resources supported. Please + // also update this check when adding a new resource + require.Equal(t, []string{ + "experiments", + "jobs", + "model_serving_endpoints", + "models", + "pipelines", + "quality_monitors", + "registered_models", + }, + resourceTypes, + ) + + return resourceTypes +} + +func TestRunAsWorksForAllowedResources(t *testing.T) { + config := config.Root{ + Workspace: config.Workspace{ + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "alice", + }, + }, + }, + RunAs: &jobs.JobRunAs{ + UserName: "bob", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job_one": { + JobSettings: &jobs.JobSettings{ + Name: "foo", + }, + }, + "job_two": { + JobSettings: &jobs.JobSettings{ + Name: "bar", + }, + }, + "job_three": { + JobSettings: &jobs.JobSettings{ + Name: "baz", + }, + }, + }, + Models: map[string]*resources.MlflowModel{ + "model_one": {}, + }, + RegisteredModels: map[string]*resources.RegisteredModel{ + "registered_model_one": {}, + }, + Experiments: map[string]*resources.MlflowExperiment{ + "experiment_one": {}, + }, + }, + } + + b := &bundle.Bundle{ + Config: config, + } + + diags := bundle.Apply(context.Background(), b, SetRunAs()) + assert.NoError(t, diags.Error()) + + for _, job := range b.Config.Resources.Jobs { + assert.Equal(t, "bob", job.RunAs.UserName) + } +} + +func TestRunAsErrorForUnsupportedResources(t *testing.T) { + // Bundle "run_as" has two modes of operation, each with a different set of + // resources that are supported. + // Cases: + // 1. When the bundle "run_as" identity is same as the current deployment + // identity. In this case all resources are supported. + // 2. When the bundle "run_as" identity is different from the current + // deployment identity. In this case only a subset of resources are + // supported. This subset of resources are defined in the allow list below. + // + // To be a part of the allow list, the resource must satisfy one of the following + // two conditions: + // 1. The resource supports setting a run_as identity to a different user + // from the owner/creator of the resource. For example, jobs. + // 2. Run as semantics do not apply to the resource. We do not plan to add + // platform side support for `run_as` for these resources. For example, + // experiments or registered models. + // + // Any resource that is not on the allow list cannot be used when the bundle + // run_as is different from the current deployment user. "bundle validate" must + // return an error if such a resource has been defined, and the run_as identity + // is different from the current deployment identity. + // + // Action Item: If you are adding a new resource to DABs, please check in with + // the relevant owning team whether the resource should be on the allow list or (implicitly) on + // the deny list. Any resources that could have run_as semantics in the future + // should be on the deny list. + // For example: Teams for pipelines, model serving endpoints or Lakeview dashboards + // are planning to add platform side support for `run_as` for these resources at + // some point in the future. These resources are (implicitly) on the deny list, since + // they are not on the allow list below. + allowList := []string{ + "jobs", + "models", + "registered_models", + "experiments", + } + + base := config.Root{ + Workspace: config.Workspace{ + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "alice", + }, + }, + }, + RunAs: &jobs.JobRunAs{ + UserName: "bob", + }, + } + + v, err := convert.FromTyped(base, dyn.NilValue) + require.NoError(t, err) + + // Define top level resources key in the bundle configuration. + // This is not part of the typed configuration, so we need to add it manually. + v, err = dyn.Set(v, "resources", dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + + for _, rt := range allResourceTypes(t) { + // Skip allowed resources + if slices.Contains(allowList, rt) { + continue + } + + // Add an instance of the resource type that is not on the allow list to + // the bundle configuration. + nv, err := dyn.SetByPath(v, dyn.NewPath(dyn.Key("resources"), dyn.Key(rt)), dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "path": dyn.V("bar"), + }), + })) + require.NoError(t, err) + + // Get back typed configuration from the newly created invalid bundle configuration. + r := &config.Root{} + err = convert.ToTyped(r, nv) + require.NoError(t, err) + + // Assert this invalid bundle configuration fails validation. + b := &bundle.Bundle{ + Config: *r, + } + diags := bundle.Apply(context.Background(), b, SetRunAs()) + assert.Equal(t, diags.Error().Error(), errUnsupportedResourceTypeForRunAs{ + resourceType: rt, + resourceLocation: dyn.Location{}, + currentUser: "alice", + runAsUser: "bob", + }.Error(), "expected run_as with a different identity than the current deployment user to not supported for resources of type: %s", rt) + } +} diff --git a/bundle/config/mutator/select_default_target.go b/bundle/config/mutator/select_default_target.go index be5046f82..4ac0aae6f 100644 --- a/bundle/config/mutator/select_default_target.go +++ b/bundle/config/mutator/select_default_target.go @@ -2,10 +2,10 @@ package mutator import ( "context" - "fmt" "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "golang.org/x/exp/maps" ) @@ -20,9 +20,9 @@ func (m *selectDefaultTarget) Name() string { return "SelectDefaultTarget" } -func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if len(b.Config.Targets) == 0 { - return fmt.Errorf("no targets defined") + return diag.Errorf("no targets defined") } // One target means there's only one default. @@ -41,12 +41,12 @@ func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error // It is invalid to have multiple targets with the `default` flag set. if len(defaults) > 1 { - return fmt.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", ")) + return diag.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", ")) } // If no target has the `default` flag set, ask the user to specify one. if len(defaults) == 0 { - return fmt.Errorf("please specify target") + return diag.Errorf("please specify target") } // One default remaining. diff --git a/bundle/config/mutator/select_default_target_test.go b/bundle/config/mutator/select_default_target_test.go index 1c2e451fe..dfea4ff67 100644 --- a/bundle/config/mutator/select_default_target_test.go +++ b/bundle/config/mutator/select_default_target_test.go @@ -16,8 +16,8 @@ func TestSelectDefaultTargetNoTargets(t *testing.T) { Targets: map[string]*config.Target{}, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.ErrorContains(t, err, "no targets defined") + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.ErrorContains(t, diags.Error(), "no targets defined") } func TestSelectDefaultTargetSingleTargets(t *testing.T) { @@ -28,8 +28,8 @@ func TestSelectDefaultTargetSingleTargets(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.NoError(t, diags.Error()) assert.Equal(t, "foo", b.Config.Bundle.Target) } @@ -43,8 +43,8 @@ func TestSelectDefaultTargetNoDefaults(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.ErrorContains(t, err, "please specify target") + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.ErrorContains(t, diags.Error(), "please specify target") } func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) { @@ -56,8 +56,8 @@ func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.ErrorContains(t, err, "please specify target") + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.ErrorContains(t, diags.Error(), "please specify target") } func TestSelectDefaultTargetMultipleDefaults(t *testing.T) { @@ -70,8 +70,8 @@ func TestSelectDefaultTargetMultipleDefaults(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.ErrorContains(t, err, "multiple targets are marked as default") + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.ErrorContains(t, diags.Error(), "multiple targets are marked as default") } func TestSelectDefaultTargetSingleDefault(t *testing.T) { @@ -84,7 +84,7 @@ func TestSelectDefaultTargetSingleDefault(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.SelectDefaultTarget()) + assert.NoError(t, diags.Error()) assert.Equal(t, "bar", b.Config.Bundle.Target) } diff --git a/bundle/config/mutator/select_target.go b/bundle/config/mutator/select_target.go index 2ad431128..178686b6e 100644 --- a/bundle/config/mutator/select_target.go +++ b/bundle/config/mutator/select_target.go @@ -6,6 +6,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "golang.org/x/exp/maps" ) @@ -24,21 +25,21 @@ func (m *selectTarget) Name() string { return fmt.Sprintf("SelectTarget(%s)", m.name) } -func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Targets == nil { - return fmt.Errorf("no targets defined") + return diag.Errorf("no targets defined") } // Get specified target - target, ok := b.Config.Targets[m.name] + _, ok := b.Config.Targets[m.name] if !ok { - return fmt.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", ")) + return diag.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", ")) } // Merge specified target into root configuration structure. - err := b.Config.MergeTargetOverrides(target) + err := b.Config.MergeTargetOverrides(m.name) if err != nil { - return err + return diag.FromErr(err) } // Store specified target in configuration for reference. diff --git a/bundle/config/mutator/select_target_test.go b/bundle/config/mutator/select_target_test.go index 20467270b..a7c5ac93c 100644 --- a/bundle/config/mutator/select_target_test.go +++ b/bundle/config/mutator/select_target_test.go @@ -26,8 +26,8 @@ func TestSelectTarget(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectTarget("default")) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.SelectTarget("default")) + require.NoError(t, diags.Error()) assert.Equal(t, "bar", b.Config.Workspace.Host) } @@ -39,6 +39,6 @@ func TestSelectTargetNotFound(t *testing.T) { }, }, } - err := bundle.Apply(context.Background(), b, mutator.SelectTarget("doesnt-exist")) - require.Error(t, err, "no targets defined") + diags := bundle.Apply(context.Background(), b, mutator.SelectTarget("doesnt-exist")) + require.Error(t, diags.Error(), "no targets defined") } diff --git a/bundle/config/mutator/set_variables.go b/bundle/config/mutator/set_variables.go index 3b9ac8ae7..47ce2ad03 100644 --- a/bundle/config/mutator/set_variables.go +++ b/bundle/config/mutator/set_variables.go @@ -6,6 +6,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/env" ) @@ -21,49 +23,63 @@ func (m *setVariables) Name() string { return "SetVariables" } -func setVariable(ctx context.Context, v *variable.Variable, name string) error { +func setVariable(ctx context.Context, v dyn.Value, variable *variable.Variable, name string) (dyn.Value, error) { // case: variable already has value initialized, so skip - if v.HasValue() { - return nil + if variable.HasValue() { + return v, nil } // case: read and set variable value from process environment envVarName := bundleVarPrefix + name if val, ok := env.Lookup(ctx, envVarName); ok { - err := v.Set(val) - if err != nil { - return fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %w`, val, name, envVarName, err) + if variable.IsComplex() { + return dyn.InvalidValue, fmt.Errorf(`setting via environment variables (%s) is not supported for complex variable %s`, envVarName, name) } - return nil - } - // case: Set the variable to its default value - if v.HasDefault() { - err := v.Set(*v.Default) + v, err := dyn.Set(v, "value", dyn.V(val)) if err != nil { - return fmt.Errorf(`failed to assign default value from config "%s" to variable %s with error: %w`, *v.Default, name, err) + return dyn.InvalidValue, fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %v`, val, name, envVarName, err) } - return nil + return v, nil } // case: Defined a variable for named lookup for a resource // It will be resolved later in ResolveResourceReferences mutator - if v.Lookup != nil { - return nil + if variable.Lookup != nil { + return v, nil + } + + // case: Set the variable to its default value + if variable.HasDefault() { + vDefault, err := dyn.Get(v, "default") + if err != nil { + return dyn.InvalidValue, fmt.Errorf(`failed to get default value from config "%s" for variable %s with error: %v`, variable.Default, name, err) + } + + v, err := dyn.Set(v, "value", vDefault) + if err != nil { + return dyn.InvalidValue, fmt.Errorf(`failed to assign default value from config "%s" to variable %s with error: %v`, variable.Default, name, err) + } + return v, nil } // We should have had a value to set for the variable at this point. - // TODO: use cmdio to request values for unassigned variables if current - // terminal is a tty. Tracked in https://github.com/databricks/cli/issues/379 - return fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) + return dyn.InvalidValue, fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name) + } -func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) error { - for name, variable := range b.Config.Variables { - err := setVariable(ctx, variable, name) - if err != nil { - return err - } - } - return nil +func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + return dyn.Map(v, "variables", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) { + name := p[1].Key() + v, ok := b.Config.Variables[name] + if !ok { + return dyn.InvalidValue, fmt.Errorf(`variable "%s" is not defined`, name) + } + + return setVariable(ctx, variable, v, name) + })) + }) + + return diag.FromErr(err) } diff --git a/bundle/config/mutator/set_variables_test.go b/bundle/config/mutator/set_variables_test.go index 15a98e5cf..d9719793f 100644 --- a/bundle/config/mutator/set_variables_test.go +++ b/bundle/config/mutator/set_variables_test.go @@ -7,6 +7,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -15,27 +17,38 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) { defaultVal := "default" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, + Default: defaultVal, } // set value for variable as an environment variable t.Setenv("BUNDLE_VAR_foo", "process-env") - - err := setVariable(context.Background(), &variable, "foo") + v, err := convert.FromTyped(variable, dyn.NilValue) require.NoError(t, err) - assert.Equal(t, *variable.Value, "process-env") + + v, err = setVariable(context.Background(), v, &variable, "foo") + require.NoError(t, err) + + err = convert.ToTyped(&variable, v) + require.NoError(t, err) + assert.Equal(t, variable.Value, "process-env") } func TestSetVariableUsingDefaultValue(t *testing.T) { defaultVal := "default" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, + Default: defaultVal, } - err := setVariable(context.Background(), &variable, "foo") + v, err := convert.FromTyped(variable, dyn.NilValue) require.NoError(t, err) - assert.Equal(t, *variable.Value, "default") + + v, err = setVariable(context.Background(), v, &variable, "foo") + require.NoError(t, err) + + err = convert.ToTyped(&variable, v) + require.NoError(t, err) + assert.Equal(t, variable.Value, "default") } func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { @@ -43,15 +56,21 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) { val := "assigned-value" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, - Value: &val, + Default: defaultVal, + Value: val, } // since a value is already assigned to the variable, it would not be overridden // by the default value - err := setVariable(context.Background(), &variable, "foo") + v, err := convert.FromTyped(variable, dyn.NilValue) require.NoError(t, err) - assert.Equal(t, *variable.Value, "assigned-value") + + v, err = setVariable(context.Background(), v, &variable, "foo") + require.NoError(t, err) + + err = convert.ToTyped(&variable, v) + require.NoError(t, err) + assert.Equal(t, variable.Value, "assigned-value") } func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { @@ -59,8 +78,8 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { val := "assigned-value" variable := variable.Variable{ Description: "a test variable", - Default: &defaultVal, - Value: &val, + Default: defaultVal, + Value: val, } // set value for variable as an environment variable @@ -68,9 +87,15 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) { // since a value is already assigned to the variable, it would not be overridden // by the value from environment - err := setVariable(context.Background(), &variable, "foo") + v, err := convert.FromTyped(variable, dyn.NilValue) require.NoError(t, err) - assert.Equal(t, *variable.Value, "assigned-value") + + v, err = setVariable(context.Background(), v, &variable, "foo") + require.NoError(t, err) + + err = convert.ToTyped(&variable, v) + require.NoError(t, err) + assert.Equal(t, variable.Value, "assigned-value") } func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) { @@ -79,7 +104,10 @@ func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) { } // fails because we could not resolve a value for the variable - err := setVariable(context.Background(), &variable, "foo") + v, err := convert.FromTyped(variable, dyn.NilValue) + require.NoError(t, err) + + _, err = setVariable(context.Background(), v, &variable, "foo") assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable") } @@ -92,15 +120,15 @@ func TestSetVariablesMutator(t *testing.T) { Variables: map[string]*variable.Variable{ "a": { Description: "resolved to default value", - Default: &defaultValForA, + Default: defaultValForA, }, "b": { Description: "resolved from environment vairables", - Default: &defaultValForB, + Default: defaultValForB, }, "c": { Description: "has already been assigned a value", - Value: &valForC, + Value: valForC, }, }, }, @@ -108,9 +136,27 @@ func TestSetVariablesMutator(t *testing.T) { t.Setenv("BUNDLE_VAR_b", "env-var-b") - err := bundle.Apply(context.Background(), b, SetVariables()) - require.NoError(t, err) - assert.Equal(t, "default-a", *b.Config.Variables["a"].Value) - assert.Equal(t, "env-var-b", *b.Config.Variables["b"].Value) - assert.Equal(t, "assigned-val-c", *b.Config.Variables["c"].Value) + diags := bundle.Apply(context.Background(), b, SetVariables()) + require.NoError(t, diags.Error()) + assert.Equal(t, "default-a", b.Config.Variables["a"].Value) + assert.Equal(t, "env-var-b", b.Config.Variables["b"].Value) + assert.Equal(t, "assigned-val-c", b.Config.Variables["c"].Value) +} + +func TestSetComplexVariablesViaEnvVariablesIsNotAllowed(t *testing.T) { + defaultVal := "default" + variable := variable.Variable{ + Description: "a test variable", + Default: defaultVal, + Type: variable.VariableTypeComplex, + } + + // set value for variable as an environment variable + t.Setenv("BUNDLE_VAR_foo", "process-env") + + v, err := convert.FromTyped(variable, dyn.NilValue) + require.NoError(t, err) + + _, err = setVariable(context.Background(), v, &variable, "foo") + assert.ErrorContains(t, err, "setting via environment variables (BUNDLE_VAR_foo) is not supported for complex variable foo") } diff --git a/bundle/config/mutator/trampoline.go b/bundle/config/mutator/trampoline.go index 24600f52f..dde9a299e 100644 --- a/bundle/config/mutator/trampoline.go +++ b/bundle/config/mutator/trampoline.go @@ -9,6 +9,7 @@ import ( "text/template" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -40,12 +41,12 @@ func (m *trampoline) Name() string { return fmt.Sprintf("trampoline(%s)", m.name) } -func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tasks := m.functions.GetTasks(b) for _, task := range tasks { err := m.generateNotebookWrapper(ctx, b, task) if err != nil { - return err + return diag.FromErr(err) } } return nil @@ -81,7 +82,7 @@ func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bund return err } - internalDirRel, err := filepath.Rel(b.Config.Path, internalDir) + internalDirRel, err := filepath.Rel(b.RootPath, internalDir) if err != nil { return err } diff --git a/bundle/config/mutator/trampoline_test.go b/bundle/config/mutator/trampoline_test.go index a3e06b303..e39076647 100644 --- a/bundle/config/mutator/trampoline_test.go +++ b/bundle/config/mutator/trampoline_test.go @@ -57,8 +57,8 @@ func TestGenerateTrampoline(t *testing.T) { } b := &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ - Path: tmpDir, Bundle: config.Bundle{ Target: "development", }, @@ -80,8 +80,8 @@ func TestGenerateTrampoline(t *testing.T) { funcs := functions{} trampoline := NewTrampoline("test_trampoline", &funcs, "Hello from {{.MyName}}") - err := bundle.Apply(ctx, b, trampoline) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, trampoline) + require.NoError(t, diags.Error()) dir, err := b.InternalDir(ctx) require.NoError(t, err) diff --git a/bundle/config/mutator/translate_paths.go b/bundle/config/mutator/translate_paths.go index b4a17afc7..28f7d3d30 100644 --- a/bundle/config/mutator/translate_paths.go +++ b/bundle/config/mutator/translate_paths.go @@ -4,13 +4,15 @@ import ( "context" "errors" "fmt" + "io/fs" "net/url" - "os" "path" "path/filepath" "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" "github.com/databricks/cli/libs/notebook" ) @@ -30,9 +32,7 @@ func (err ErrIsNotNotebook) Error() string { return fmt.Sprintf("file at %s is not a notebook", err.path) } -type translatePaths struct { - seen map[string]string -} +type translatePaths struct{} // TranslatePaths converts paths to local notebook files into paths in the workspace file system. func TranslatePaths() bundle.Mutator { @@ -45,6 +45,18 @@ func (m *translatePaths) Name() string { type rewriteFunc func(literal, localFullPath, localRelPath, remotePath string) (string, error) +// translateContext is a context for rewriting paths in a config. +// It is freshly instantiated on every mutator apply call. +// It provides access to the underlying bundle object such that +// it doesn't have to be passed around explicitly. +type translateContext struct { + b *bundle.Bundle + + // seen is a map of local paths to their corresponding remote paths. + // If a local path has already been successfully resolved, we do not need to resolve it again. + seen map[string]string +} + // rewritePath converts a given relative path from the loaded config to a new path based on the passed rewriting function // // It takes these arguments: @@ -54,14 +66,13 @@ type rewriteFunc func(literal, localFullPath, localRelPath, remotePath string) ( // This logic is different between regular files or notebooks. // // The function returns an error if it is impossible to rewrite the given relative path. -func (m *translatePaths) rewritePath( +func (t *translateContext) rewritePath( dir string, - b *bundle.Bundle, p *string, fn rewriteFunc, ) error { // We assume absolute paths point to a location in the workspace - if path.IsAbs(filepath.ToSlash(*p)) { + if path.IsAbs(*p) { return nil } @@ -77,13 +88,14 @@ func (m *translatePaths) rewritePath( // Local path is relative to the directory the resource was defined in. localPath := filepath.Join(dir, filepath.FromSlash(*p)) - if interp, ok := m.seen[localPath]; ok { + if interp, ok := t.seen[localPath]; ok { *p = interp return nil } - // Remote path must be relative to the bundle root. - localRelPath, err := filepath.Rel(b.Config.Path, localPath) + // Local path must be contained in the bundle root. + // If it isn't, it won't be synchronized into the workspace. + localRelPath, err := filepath.Rel(t.b.RootPath, localPath) if err != nil { return err } @@ -92,22 +104,22 @@ func (m *translatePaths) rewritePath( } // Prefix remote path with its remote root path. - remotePath := path.Join(b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath)) + remotePath := path.Join(t.b.Config.Workspace.FilePath, filepath.ToSlash(localRelPath)) // Convert local path into workspace path via specified function. - interp, err := fn(*p, localPath, localRelPath, filepath.ToSlash(remotePath)) + interp, err := fn(*p, localPath, localRelPath, remotePath) if err != nil { return err } *p = interp - m.seen[localPath] = interp + t.seen[localPath] = interp return nil } -func translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - nb, _, err := notebook.Detect(localFullPath) - if os.IsNotExist(err) { +func (t *translateContext) translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath)) + if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("notebook %s not found", literal) } if err != nil { @@ -121,9 +133,9 @@ func translateNotebookPath(literal, localFullPath, localRelPath, remotePath stri return strings.TrimSuffix(remotePath, filepath.Ext(localFullPath)), nil } -func translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - nb, _, err := notebook.Detect(localFullPath) - if os.IsNotExist(err) { +func (t *translateContext) translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + nb, _, err := notebook.DetectWithFS(t.b.BundleRoot, filepath.ToSlash(localRelPath)) + if errors.Is(err, fs.ErrNotExist) { return "", fmt.Errorf("file %s not found", literal) } if err != nil { @@ -135,8 +147,8 @@ func translateFilePath(literal, localFullPath, localRelPath, remotePath string) return remotePath, nil } -func translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { - info, err := os.Stat(localFullPath) +func (t *translateContext) translateDirectoryPath(literal, localFullPath, localRelPath, remotePath string) (string, error) { + info, err := t.b.BundleRoot.Stat(filepath.ToSlash(localRelPath)) if err != nil { return "", err } @@ -146,59 +158,99 @@ func translateDirectoryPath(literal, localFullPath, localRelPath, remotePath str return remotePath, nil } -func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) { +func (t *translateContext) translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) { return localRelPath, nil } -type transformer struct { - // A directory path relative to which `path` will be transformed - dir string - // A path to transform - path *string - // Name of the config property where the path string is coming from - configPath string - // A function that performs the actual rewriting logic. - fn rewriteFunc +func (t *translateContext) translateNoOpWithPrefix(literal, localFullPath, localRelPath, remotePath string) (string, error) { + if !strings.HasPrefix(localRelPath, ".") { + localRelPath = "." + string(filepath.Separator) + localRelPath + } + return localRelPath, nil } -type transformFunc func(resource any, dir string) *transformer - -// Apply all matches transformers for the given resource -func (m *translatePaths) applyTransformers(funcs []transformFunc, b *bundle.Bundle, resource any, dir string) error { - for _, transformFn := range funcs { - transformer := transformFn(resource, dir) - if transformer == nil { - continue +func (t *translateContext) rewriteValue(p dyn.Path, v dyn.Value, fn rewriteFunc, dir string) (dyn.Value, error) { + out := v.MustString() + err := t.rewritePath(dir, &out, fn) + if err != nil { + if target := (&ErrIsNotebook{}); errors.As(err, target) { + return dyn.InvalidValue, fmt.Errorf(`expected a file for "%s" but got a notebook: %w`, p, target) } + if target := (&ErrIsNotNotebook{}); errors.As(err, target) { + return dyn.InvalidValue, fmt.Errorf(`expected a notebook for "%s" but got a file: %w`, p, target) + } + return dyn.InvalidValue, err + } - err := m.rewritePath(transformer.dir, b, transformer.path, transformer.fn) - if err != nil { - if target := (&ErrIsNotebook{}); errors.As(err, target) { - return fmt.Errorf(`expected a file for "%s" but got a notebook: %w`, transformer.configPath, target) - } - if target := (&ErrIsNotNotebook{}); errors.As(err, target) { - return fmt.Errorf(`expected a notebook for "%s" but got a file: %w`, transformer.configPath, target) - } - return err + return dyn.NewValue(out, v.Locations()), nil +} + +func (t *translateContext) rewriteRelativeTo(p dyn.Path, v dyn.Value, fn rewriteFunc, dir, fallback string) (dyn.Value, error) { + nv, err := t.rewriteValue(p, v, fn, dir) + if err == nil { + return nv, nil + } + + // If we failed to rewrite the path, try to rewrite it relative to the fallback directory. + if fallback != "" { + nv, nerr := t.rewriteValue(p, v, fn, fallback) + if nerr == nil { + // TODO: Emit a warning that this path should be rewritten. + return nv, nil } } - return nil + return dyn.InvalidValue, err } -func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error { - m.seen = make(map[string]string) - - for _, fn := range []func(*translatePaths, *bundle.Bundle) error{ - applyJobTransformers, - applyPipelineTransformers, - applyArtifactTransformers, - } { - err := fn(m, b) - if err != nil { - return err - } +func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + t := &translateContext{ + b: b, + seen: make(map[string]string), } - return nil + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + var err error + for _, fn := range []func(dyn.Value) (dyn.Value, error){ + t.applyJobTranslations, + t.applyPipelineTranslations, + t.applyArtifactTranslations, + } { + v, err = fn(v) + if err != nil { + return dyn.InvalidValue, err + } + } + return v, nil + }) + + return diag.FromErr(err) +} + +func gatherFallbackPaths(v dyn.Value, typ string) (map[string]string, error) { + var fallback = make(map[string]string) + var pattern = dyn.NewPattern(dyn.Key("resources"), dyn.Key(typ), dyn.AnyKey()) + + // Previous behavior was to use a resource's location as the base path to resolve + // relative paths in its definition. With the introduction of [dyn.Value] throughout, + // we can use the location of the [dyn.Value] of the relative path itself. + // + // This is more flexible, as resources may have overrides that are not + // located in the same directory as the resource configuration file. + // + // To maintain backwards compatibility, we allow relative paths to be resolved using + // the original approach as fallback if the [dyn.Value] location cannot be resolved. + _, err := dyn.MapByPattern(v, pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + key := p[2].Key() + dir, err := v.Location().Directory() + if err != nil { + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for %s: %w", p, err) + } + fallback[key] = dir + return v, nil + }) + if err != nil { + return nil, err + } + return fallback, nil } diff --git a/bundle/config/mutator/translate_paths_artifacts.go b/bundle/config/mutator/translate_paths_artifacts.go index 91e8397cb..921c00c73 100644 --- a/bundle/config/mutator/translate_paths_artifacts.go +++ b/bundle/config/mutator/translate_paths_artifacts.go @@ -3,40 +3,47 @@ package mutator import ( "fmt" - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/dyn" ) -func transformArtifactPath(resource any, dir string) *transformer { - artifact, ok := resource.(*config.Artifact) - if !ok { - return nil - } +type artifactRewritePattern struct { + pattern dyn.Pattern + fn rewriteFunc +} - return &transformer{ - dir, - &artifact.Path, - "artifacts.path", - translateNoOp, +func (t *translateContext) artifactRewritePatterns() []artifactRewritePattern { + // Base pattern to match all artifacts. + base := dyn.NewPattern( + dyn.Key("artifacts"), + dyn.AnyKey(), + ) + + // Compile list of configuration paths to rewrite. + return []artifactRewritePattern{ + { + base.Append(dyn.Key("path")), + t.translateNoOp, + }, } } -func applyArtifactTransformers(m *translatePaths, b *bundle.Bundle) error { - artifactTransformers := []transformFunc{ - transformArtifactPath, - } +func (t *translateContext) applyArtifactTranslations(v dyn.Value) (dyn.Value, error) { + var err error - for key, artifact := range b.Config.Artifacts { - dir, err := artifact.ConfigFileDirectory() - if err != nil { - return fmt.Errorf("unable to determine directory for artifact %s: %w", key, err) - } + for _, rewritePattern := range t.artifactRewritePatterns() { + v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + key := p[1].Key() + dir, err := v.Location().Directory() + if err != nil { + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for artifact %s: %w", key, err) + } - err = m.applyTransformers(artifactTransformers, b, artifact, dir) + return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, "") + }) if err != nil { - return err + return dyn.InvalidValue, err } } - return nil + return v, nil } diff --git a/bundle/config/mutator/translate_paths_jobs.go b/bundle/config/mutator/translate_paths_jobs.go index d920c2209..60cc8bb9a 100644 --- a/bundle/config/mutator/translate_paths_jobs.go +++ b/bundle/config/mutator/translate_paths_jobs.go @@ -2,132 +2,134 @@ package mutator import ( "fmt" + "slices" - "github.com/databricks/cli/bundle" - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/dyn" ) -func transformNotebookTask(resource any, dir string) *transformer { - task, ok := resource.(*jobs.Task) - if !ok || task.NotebookTask == nil { - return nil - } +type jobRewritePattern struct { + pattern dyn.Pattern + fn rewriteFunc + skipRewrite func(string) bool +} - return &transformer{ - dir, - &task.NotebookTask.NotebookPath, - "tasks.notebook_task.notebook_path", - translateNotebookPath, +func noSkipRewrite(string) bool { + return false +} + +func rewritePatterns(t *translateContext, base dyn.Pattern) []jobRewritePattern { + return []jobRewritePattern{ + { + base.Append(dyn.Key("notebook_task"), dyn.Key("notebook_path")), + t.translateNotebookPath, + noSkipRewrite, + }, + { + base.Append(dyn.Key("spark_python_task"), dyn.Key("python_file")), + t.translateFilePath, + noSkipRewrite, + }, + { + base.Append(dyn.Key("dbt_task"), dyn.Key("project_directory")), + t.translateDirectoryPath, + noSkipRewrite, + }, + { + base.Append(dyn.Key("sql_task"), dyn.Key("file"), dyn.Key("path")), + t.translateFilePath, + noSkipRewrite, + }, + { + base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("whl")), + t.translateNoOp, + noSkipRewrite, + }, + { + base.Append(dyn.Key("libraries"), dyn.AnyIndex(), dyn.Key("jar")), + t.translateNoOp, + noSkipRewrite, + }, } } -func transformSparkTask(resource any, dir string) *transformer { - task, ok := resource.(*jobs.Task) - if !ok || task.SparkPythonTask == nil { - return nil +func (t *translateContext) jobRewritePatterns() []jobRewritePattern { + // Base pattern to match all tasks in all jobs. + base := dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("jobs"), + dyn.AnyKey(), + dyn.Key("tasks"), + dyn.AnyIndex(), + ) + + // Compile list of patterns and their respective rewrite functions. + jobEnvironmentsPatterns := []jobRewritePattern{ + { + dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("jobs"), + dyn.AnyKey(), + dyn.Key("environments"), + dyn.AnyIndex(), + dyn.Key("spec"), + dyn.Key("dependencies"), + dyn.AnyIndex(), + ), + t.translateNoOpWithPrefix, + func(s string) bool { + return !libraries.IsEnvironmentDependencyLocal(s) + }, + }, } - return &transformer{ - dir, - &task.SparkPythonTask.PythonFile, - "tasks.spark_python_task.python_file", - translateFilePath, - } + taskPatterns := rewritePatterns(t, base) + forEachPatterns := rewritePatterns(t, base.Append(dyn.Key("for_each_task"), dyn.Key("task"))) + allPatterns := append(taskPatterns, jobEnvironmentsPatterns...) + allPatterns = append(allPatterns, forEachPatterns...) + return allPatterns } -func transformWhlLibrary(resource any, dir string) *transformer { - library, ok := resource.(*compute.Library) - if !ok || library.Whl == "" { - return nil +func (t *translateContext) applyJobTranslations(v dyn.Value) (dyn.Value, error) { + var err error + + fallback, err := gatherFallbackPaths(v, "jobs") + if err != nil { + return dyn.InvalidValue, err } - return &transformer{ - dir, - &library.Whl, - "libraries.whl", - translateNoOp, // Does not convert to remote path but makes sure that nested paths resolved correctly - } -} - -func transformDbtTask(resource any, dir string) *transformer { - task, ok := resource.(*jobs.Task) - if !ok || task.DbtTask == nil { - return nil - } - - return &transformer{ - dir, - &task.DbtTask.ProjectDirectory, - "tasks.dbt_task.project_directory", - translateDirectoryPath, - } -} - -func transformSqlFileTask(resource any, dir string) *transformer { - task, ok := resource.(*jobs.Task) - if !ok || task.SqlTask == nil || task.SqlTask.File == nil { - return nil - } - - return &transformer{ - dir, - &task.SqlTask.File.Path, - "tasks.sql_task.file.path", - translateFilePath, - } -} - -func transformJarLibrary(resource any, dir string) *transformer { - library, ok := resource.(*compute.Library) - if !ok || library.Jar == "" { - return nil - } - - return &transformer{ - dir, - &library.Jar, - "libraries.jar", - translateNoOp, // Does not convert to remote path but makes sure that nested paths resolved correctly - } -} - -func applyJobTransformers(m *translatePaths, b *bundle.Bundle) error { - jobTransformers := []transformFunc{ - transformNotebookTask, - transformSparkTask, - transformWhlLibrary, - transformJarLibrary, - transformDbtTask, - transformSqlFileTask, - } - - for key, job := range b.Config.Resources.Jobs { - dir, err := job.ConfigFileDirectory() - if err != nil { - return fmt.Errorf("unable to determine directory for job %s: %w", key, err) - } - - // Do not translate job task paths if using git source + // Do not translate job task paths if using Git source + var ignore []string + for key, job := range t.b.Config.Resources.Jobs { if job.GitSource != nil { - continue - } - - for i := 0; i < len(job.Tasks); i++ { - task := &job.Tasks[i] - err := m.applyTransformers(jobTransformers, b, task, dir) - if err != nil { - return err - } - for j := 0; j < len(task.Libraries); j++ { - library := &task.Libraries[j] - err := m.applyTransformers(jobTransformers, b, library, dir) - if err != nil { - return err - } - } + ignore = append(ignore, key) } } - return nil + for _, rewritePattern := range t.jobRewritePatterns() { + v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + key := p[2].Key() + + // Skip path translation if the job is using git source. + if slices.Contains(ignore, key) { + return v, nil + } + + dir, err := v.Location().Directory() + if err != nil { + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for job %s: %w", key, err) + } + + sv := v.MustString() + if rewritePattern.skipRewrite(sv) { + return v, nil + } + return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key]) + }) + if err != nil { + return dyn.InvalidValue, err + } + } + + return v, nil } diff --git a/bundle/config/mutator/translate_paths_pipelines.go b/bundle/config/mutator/translate_paths_pipelines.go index 1afdb9d51..71a65e846 100644 --- a/bundle/config/mutator/translate_paths_pipelines.go +++ b/bundle/config/mutator/translate_paths_pipelines.go @@ -3,58 +3,59 @@ package mutator import ( "fmt" - "github.com/databricks/cli/bundle" - "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/databricks/cli/libs/dyn" ) -func transformLibraryNotebook(resource any, dir string) *transformer { - library, ok := resource.(*pipelines.PipelineLibrary) - if !ok || library.Notebook == nil { - return nil - } +type pipelineRewritePattern struct { + pattern dyn.Pattern + fn rewriteFunc +} - return &transformer{ - dir, - &library.Notebook.Path, - "libraries.notebook.path", - translateNotebookPath, +func (t *translateContext) pipelineRewritePatterns() []pipelineRewritePattern { + // Base pattern to match all libraries in all pipelines. + base := dyn.NewPattern( + dyn.Key("resources"), + dyn.Key("pipelines"), + dyn.AnyKey(), + dyn.Key("libraries"), + dyn.AnyIndex(), + ) + + // Compile list of configuration paths to rewrite. + return []pipelineRewritePattern{ + { + base.Append(dyn.Key("notebook"), dyn.Key("path")), + t.translateNotebookPath, + }, + { + base.Append(dyn.Key("file"), dyn.Key("path")), + t.translateFilePath, + }, } } -func transformLibraryFile(resource any, dir string) *transformer { - library, ok := resource.(*pipelines.PipelineLibrary) - if !ok || library.File == nil { - return nil +func (t *translateContext) applyPipelineTranslations(v dyn.Value) (dyn.Value, error) { + var err error + + fallback, err := gatherFallbackPaths(v, "pipelines") + if err != nil { + return dyn.InvalidValue, err } - return &transformer{ - dir, - &library.File.Path, - "libraries.file.path", - translateFilePath, - } -} - -func applyPipelineTransformers(m *translatePaths, b *bundle.Bundle) error { - pipelineTransformers := []transformFunc{ - transformLibraryNotebook, - transformLibraryFile, - } - - for key, pipeline := range b.Config.Resources.Pipelines { - dir, err := pipeline.ConfigFileDirectory() - if err != nil { - return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) - } - - for i := 0; i < len(pipeline.Libraries); i++ { - library := &pipeline.Libraries[i] - err := m.applyTransformers(pipelineTransformers, b, library, dir) + for _, rewritePattern := range t.pipelineRewritePatterns() { + v, err = dyn.MapByPattern(v, rewritePattern.pattern, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + key := p[2].Key() + dir, err := v.Location().Directory() if err != nil { - return err + return dyn.InvalidValue, fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err) } + + return t.rewriteRelativeTo(p, v, rewritePattern.fn, dir, fallback[key]) + }) + if err != nil { + return dyn.InvalidValue, err } } - return nil + return v, nil } diff --git a/bundle/config/mutator/translate_paths_test.go b/bundle/config/mutator/translate_paths_test.go index 67f15d407..780a540df 100644 --- a/bundle/config/mutator/translate_paths_test.go +++ b/bundle/config/mutator/translate_paths_test.go @@ -4,13 +4,18 @@ import ( "context" "os" "path/filepath" + "strings" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/bundle/internal/bundletest" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -36,18 +41,15 @@ func touchEmptyFile(t *testing.T, path string) { func TestTranslatePathsSkippedWithGitSource(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, JobSettings: &jobs.JobSettings{ GitSource: &jobs.GitSource{ GitBranch: "somebranch", @@ -80,8 +82,10 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - require.NoError(t, err) + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) assert.Equal( t, @@ -108,17 +112,15 @@ func TestTranslatePaths(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar")) b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -171,9 +173,6 @@ func TestTranslatePaths(t *testing.T) { }, Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -207,8 +206,10 @@ func TestTranslatePaths(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - require.NoError(t, err) + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) // Assert that the path in the tasks now refer to the artifact. assert.Equal( @@ -279,17 +280,15 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "job", "my_dbt_project", "dbt_project.yml")) b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "job/resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -323,10 +322,6 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { }, Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "pipeline/resource.yml"), - }, - PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -342,8 +337,11 @@ func TestTranslatePathsInSubdirectories(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - require.NoError(t, err) + bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml")) + bundletest.SetLocation(b, "resources.pipelines", filepath.Join(dir, "pipeline/resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) assert.Equal( t, @@ -377,17 +375,15 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "../resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -403,22 +399,22 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, "is not contained in bundle root") + bundletest.SetLocation(b, ".", filepath.Join(dir, "../resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), "is not contained in bundle root") } func TestJobNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "fake.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -434,22 +430,22 @@ func TestJobNotebookDoesNotExistError(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") + bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found") } func TestJobFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "fake.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -465,22 +461,22 @@ func TestJobFileDoesNotExistError(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.EqualError(t, err, "file ./doesnt_exist.py not found") + bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found") } func TestPipelineNotebookDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "fake.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -496,22 +492,22 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.EqualError(t, err, "notebook ./doesnt_exist.py not found") + bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.EqualError(t, diags.Error(), "notebook ./doesnt_exist.py not found") } func TestPipelineFileDoesNotExistError(t *testing.T) { dir := t.TempDir() b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "fake.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -527,8 +523,10 @@ func TestPipelineFileDoesNotExistError(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.EqualError(t, err, "file ./doesnt_exist.py not found") + bundletest.SetLocation(b, ".", filepath.Join(dir, "fake.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.EqualError(t, diags.Error(), "file ./doesnt_exist.py not found") } func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { @@ -536,17 +534,15 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -562,8 +558,10 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a file for "tasks.spark_python_task.python_file" but got a notebook`) + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), `expected a file for "resources.jobs.job.tasks[0].spark_python_task.python_file" but got a notebook`) } func TestJobNotebookTaskWithFileSourceError(t *testing.T) { @@ -571,17 +569,15 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, JobSettings: &jobs.JobSettings{ Tasks: []jobs.Task{ { @@ -597,8 +593,10 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a notebook for "tasks.notebook_task.notebook_path" but got a file`) + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.jobs.job.tasks[0].notebook_task.notebook_path" but got a file`) } func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { @@ -606,17 +604,15 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { touchEmptyFile(t, filepath.Join(dir, "my_file.py")) b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -632,8 +628,10 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a notebook for "libraries.notebook.path" but got a file`) + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), `expected a notebook for "resources.pipelines.pipeline.libraries[0].notebook.path" but got a file`) } func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { @@ -641,17 +639,15 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { touchNotebookFile(t, filepath.Join(dir, "my_notebook.py")) b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), Config: config.Root{ - Path: dir, Workspace: config.Workspace{ FilePath: "/bundle", }, Resources: config.Resources{ Pipelines: map[string]*resources.Pipeline{ "pipeline": { - Paths: paths.Paths{ - ConfigFilePath: filepath.Join(dir, "resource.yml"), - }, PipelineSpec: &pipelines.PipelineSpec{ Libraries: []pipelines.PipelineLibrary{ { @@ -667,6 +663,112 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) - assert.ErrorContains(t, err, `expected a file for "libraries.file.path" but got a notebook`) + bundletest.SetLocation(b, ".", filepath.Join(dir, "resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + assert.ErrorContains(t, diags.Error(), `expected a file for "resources.pipelines.pipeline.libraries[0].file.path" but got a notebook`) +} + +func TestTranslatePathJobEnvironments(t *testing.T) { + dir := t.TempDir() + touchEmptyFile(t, filepath.Join(dir, "env1.py")) + touchEmptyFile(t, filepath.Join(dir, "env2.py")) + + b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + "./dist/env1.whl", + "../dist/env2.whl", + "simplejson", + "/Workspace/Users/foo@bar.com/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml")) + + diags := bundle.Apply(context.Background(), b, mutator.TranslatePaths()) + require.NoError(t, diags.Error()) + + assert.Equal(t, strings.Join([]string{".", "job", "dist", "env1.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[0]) + assert.Equal(t, strings.Join([]string{".", "dist", "env2.whl"}, string(os.PathSeparator)), b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[1]) + assert.Equal(t, "simplejson", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[2]) + assert.Equal(t, "/Workspace/Users/foo@bar.com/test.whl", b.Config.Resources.Jobs["job"].JobSettings.Environments[0].Spec.Dependencies[3]) +} + +func TestTranslatePathWithComplexVariables(t *testing.T) { + dir := t.TempDir() + b := &bundle.Bundle{ + RootPath: dir, + BundleRoot: vfs.MustNew(dir), + Config: config.Root{ + Variables: map[string]*variable.Variable{ + "cluster_libraries": { + Type: variable.VariableTypeComplex, + Default: [](map[string]string){ + { + "whl": "./local/whl.whl", + }, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + TaskKey: "test", + }, + }, + }, + }, + }, + }, + }, + } + + bundletest.SetLocation(b, "variables", filepath.Join(dir, "variables/variables.yml")) + bundletest.SetLocation(b, "resources.jobs", filepath.Join(dir, "job/resource.yml")) + + ctx := context.Background() + // Assign the variables to the dynamic configuration. + diags := bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + p := dyn.MustPathFromString("resources.jobs.job.tasks[0]") + return dyn.SetByPath(v, p.Append(dyn.Key("libraries")), dyn.V("${var.cluster_libraries}")) + }) + return diag.FromErr(err) + }) + require.NoError(t, diags.Error()) + + diags = bundle.Apply(ctx, b, + bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferences("variables"), + mutator.TranslatePaths(), + )) + require.NoError(t, diags.Error()) + + assert.Equal( + t, + filepath.Join("variables", "local", "whl.whl"), + b.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl, + ) } diff --git a/bundle/config/mutator/validate_git_details.go b/bundle/config/mutator/validate_git_details.go index 116498bfc..69a4221fd 100644 --- a/bundle/config/mutator/validate_git_details.go +++ b/bundle/config/mutator/validate_git_details.go @@ -2,9 +2,9 @@ package mutator import ( "context" - "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) type validateGitDetails struct{} @@ -17,13 +17,13 @@ func (m *validateGitDetails) Name() string { return "ValidateGitDetails" } -func (m *validateGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *validateGitDetails) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if b.Config.Bundle.Git.Branch == "" || b.Config.Bundle.Git.ActualBranch == "" { return nil } if b.Config.Bundle.Git.Branch != b.Config.Bundle.Git.ActualBranch && !b.Config.Bundle.Force { - return fmt.Errorf("not on the right Git branch:\n expected according to configuration: %s\n actual: %s\nuse --force to override", b.Config.Bundle.Git.Branch, b.Config.Bundle.Git.ActualBranch) + return diag.Errorf("not on the right Git branch:\n expected according to configuration: %s\n actual: %s\nuse --force to override", b.Config.Bundle.Git.Branch, b.Config.Bundle.Git.ActualBranch) } return nil } diff --git a/bundle/config/mutator/validate_git_details_test.go b/bundle/config/mutator/validate_git_details_test.go index f207d9cf9..952e0b572 100644 --- a/bundle/config/mutator/validate_git_details_test.go +++ b/bundle/config/mutator/validate_git_details_test.go @@ -22,9 +22,8 @@ func TestValidateGitDetailsMatchingBranches(t *testing.T) { } m := ValidateGitDetails() - err := bundle.Apply(context.Background(), b, m) - - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) } func TestValidateGitDetailsNonMatchingBranches(t *testing.T) { @@ -40,10 +39,10 @@ func TestValidateGitDetailsNonMatchingBranches(t *testing.T) { } m := ValidateGitDetails() - err := bundle.Apply(context.Background(), b, m) + diags := bundle.Apply(context.Background(), b, m) expectedError := "not on the right Git branch:\n expected according to configuration: main\n actual: feature\nuse --force to override" - assert.EqualError(t, err, expectedError) + assert.EqualError(t, diags.Error(), expectedError) } func TestValidateGitDetailsNotUsingGit(t *testing.T) { @@ -59,7 +58,6 @@ func TestValidateGitDetailsNotUsingGit(t *testing.T) { } m := ValidateGitDetails() - err := bundle.Apply(context.Background(), b, m) - - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) } diff --git a/bundle/config/mutator/verify_cli_version.go b/bundle/config/mutator/verify_cli_version.go new file mode 100644 index 000000000..9c32fcc9d --- /dev/null +++ b/bundle/config/mutator/verify_cli_version.go @@ -0,0 +1,82 @@ +package mutator + +import ( + "context" + "fmt" + "regexp" + + semver "github.com/Masterminds/semver/v3" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/libs/diag" +) + +func VerifyCliVersion() bundle.Mutator { + return &verifyCliVersion{} +} + +type verifyCliVersion struct { +} + +func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + // No constraints specified, skip the check. + if b.Config.Bundle.DatabricksCliVersion == "" { + return nil + } + + constraint := b.Config.Bundle.DatabricksCliVersion + if err := validateConstraintSyntax(constraint); err != nil { + return diag.FromErr(err) + } + currentVersion := build.GetInfo().Version + c, err := semver.NewConstraint(constraint) + if err != nil { + return diag.FromErr(err) + } + + version, err := semver.NewVersion(currentVersion) + if err != nil { + return diag.Errorf("parsing CLI version %q failed", currentVersion) + } + + if !c.Check(version) { + return diag.Errorf("Databricks CLI version constraint not satisfied. Required: %s, current: %s", constraint, currentVersion) + } + + return nil +} + +func (v *verifyCliVersion) Name() string { + return "VerifyCliVersion" +} + +// validateConstraintSyntax validates the syntax of the version constraint. +func validateConstraintSyntax(constraint string) error { + r := generateConstraintSyntaxRegexp() + if !r.MatchString(constraint) { + return fmt.Errorf("invalid version constraint %q specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)", constraint) + } + + return nil +} + +// Generate regexp which matches the supported version constraint syntax. +func generateConstraintSyntaxRegexp() *regexp.Regexp { + // We intentionally only support the format supported by requirements.txt: + // 1. 0.0.0 + // 2. >= 0.0.0 + // 3. <= 0.0.0 + // 4. > 0.0.0 + // 5. < 0.0.0 + // 6. != 0.0.0 + // 7. 0.0.* + // 8. 0.* + // 9. >= 0.0.0, <= 1.0.0 + // 10. 0.0.0-0 + // 11. 0.0.0-beta + // 12. >= 0.0.0-0, <= 1.0.0-0 + + matchVersion := `(\d+\.\d+\.\d+(\-\w+)?|\d+\.\d+.\*|\d+\.\*)` + matchOperators := `(>=|<=|>|<|!=)?` + return regexp.MustCompile(fmt.Sprintf(`^%s ?%s(, %s %s)?$`, matchOperators, matchVersion, matchOperators, matchVersion)) +} diff --git a/bundle/config/mutator/verify_cli_version_test.go b/bundle/config/mutator/verify_cli_version_test.go new file mode 100644 index 000000000..24f656745 --- /dev/null +++ b/bundle/config/mutator/verify_cli_version_test.go @@ -0,0 +1,174 @@ +package mutator + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/build" + "github.com/stretchr/testify/require" +) + +type testCase struct { + currentVersion string + constraint string + expectedError string +} + +func TestVerifyCliVersion(t *testing.T) { + testCases := []testCase{ + { + currentVersion: "0.0.1", + }, + { + currentVersion: "0.0.1", + constraint: "0.100.0", + expectedError: "Databricks CLI version constraint not satisfied. Required: 0.100.0, current: 0.0.1", + }, + { + currentVersion: "0.0.1", + constraint: ">= 0.100.0", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, current: 0.0.1", + }, + { + currentVersion: "0.100.0", + constraint: "0.100.0", + }, + { + currentVersion: "0.100.1", + constraint: "0.100.0", + expectedError: "Databricks CLI version constraint not satisfied. Required: 0.100.0, current: 0.100.1", + }, + { + currentVersion: "0.100.1", + constraint: ">= 0.100.0", + }, + { + currentVersion: "0.100.0", + constraint: "<= 1.0.0", + }, + { + currentVersion: "1.0.0", + constraint: "<= 1.0.0", + }, + { + currentVersion: "1.0.0", + constraint: "<= 0.100.0", + expectedError: "Databricks CLI version constraint not satisfied. Required: <= 0.100.0, current: 1.0.0", + }, + { + currentVersion: "0.99.0", + constraint: ">= 0.100.0, <= 0.100.2", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, <= 0.100.2, current: 0.99.0", + }, + { + currentVersion: "0.100.0", + constraint: ">= 0.100.0, <= 0.100.2", + }, + { + currentVersion: "0.100.1", + constraint: ">= 0.100.0, <= 0.100.2", + }, + { + currentVersion: "0.100.2", + constraint: ">= 0.100.0, <= 0.100.2", + }, + { + currentVersion: "0.101.0", + constraint: ">= 0.100.0, <= 0.100.2", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, <= 0.100.2, current: 0.101.0", + }, + { + currentVersion: "0.100.0-beta", + constraint: ">= 0.100.0, <= 0.100.2", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, <= 0.100.2, current: 0.100.0-beta", + }, + { + currentVersion: "0.100.0-beta", + constraint: ">= 0.100.0-0, <= 0.100.2-0", + }, + { + currentVersion: "0.100.1-beta", + constraint: ">= 0.100.0-0, <= 0.100.2-0", + }, + { + currentVersion: "0.100.3-beta", + constraint: ">= 0.100.0, <= 0.100.2", + expectedError: "Databricks CLI version constraint not satisfied. Required: >= 0.100.0, <= 0.100.2, current: 0.100.3-beta", + }, + { + currentVersion: "0.100.123", + constraint: "0.100.*", + }, + { + currentVersion: "0.100.123", + constraint: "^0.100", + expectedError: "invalid version constraint \"^0.100\" specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)", + }, + } + + t.Cleanup(func() { + // Reset the build version to the default version + // so that it doesn't affect other tests + // It doesn't really matter what we configure this to when testing + // as long as it is a valid semver version. + build.SetBuildVersion(build.DefaultSemver) + }) + + for i, tc := range testCases { + t.Run(fmt.Sprintf("testcase #%d", i), func(t *testing.T) { + build.SetBuildVersion(tc.currentVersion) + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + DatabricksCliVersion: tc.constraint, + }, + }, + } + diags := bundle.Apply(context.Background(), b, VerifyCliVersion()) + if tc.expectedError != "" { + require.NotEmpty(t, diags) + require.Equal(t, tc.expectedError, diags.Error().Error()) + } else { + require.Empty(t, diags) + } + }) + } +} + +func TestValidateConstraint(t *testing.T) { + testCases := []struct { + constraint string + expected bool + }{ + {"0.0.0", true}, + {">= 0.0.0", true}, + {"<= 0.0.0", true}, + {"> 0.0.0", true}, + {"< 0.0.0", true}, + {"!= 0.0.0", true}, + {"0.0.*", true}, + {"0.*", true}, + {">= 0.0.0, <= 1.0.0", true}, + {">= 0.0.0-0, <= 1.0.0-0", true}, + {"0.0.0-0", true}, + {"0.0.0-beta", true}, + {"^0.0.0", false}, + {"~0.0.0", false}, + {"0.0.0 1.0.0", false}, + {"> 0.0.0 < 1.0.0", false}, + } + + for _, tc := range testCases { + t.Run(tc.constraint, func(t *testing.T) { + err := validateConstraintSyntax(tc.constraint) + if tc.expected { + require.NoError(t, err) + } else { + require.Error(t, err) + } + }) + } +} diff --git a/bundle/config/paths/paths.go b/bundle/config/paths/paths.go index 2c9ecb8c0..95977ee37 100644 --- a/bundle/config/paths/paths.go +++ b/bundle/config/paths/paths.go @@ -1,19 +1,22 @@ package paths import ( - "fmt" - "path/filepath" + "github.com/databricks/cli/libs/dyn" ) type Paths struct { // Absolute path on the local file system to the configuration file that holds // the definition of this resource. ConfigFilePath string `json:"-" bundle:"readonly"` + + // DynamicValue stores the [dyn.Value] of the containing struct. + // This assumes that this struct is always embedded. + DynamicValue dyn.Value `json:"-"` } -func (p *Paths) ConfigFileDirectory() (string, error) { - if p.ConfigFilePath == "" { - return "", fmt.Errorf("config file path not configured") +func (p *Paths) ConfigureConfigFilePath() { + if !p.DynamicValue.IsValid() { + panic("DynamicValue not set") } - return filepath.Dir(p.ConfigFilePath), nil + p.ConfigFilePath = p.DynamicValue.Location().File } diff --git a/bundle/config/resources.go b/bundle/config/resources.go index 2b453c666..f70052ec0 100644 --- a/bundle/config/resources.go +++ b/bundle/config/resources.go @@ -1,9 +1,11 @@ package config import ( + "context" "fmt" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go" ) // Resources defines Databricks resources associated with the bundle. @@ -15,6 +17,7 @@ type Resources struct { Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"` ModelServingEndpoints map[string]*resources.ModelServingEndpoint `json:"model_serving_endpoints,omitempty"` RegisteredModels map[string]*resources.RegisteredModel `json:"registered_models,omitempty"` + QualityMonitors map[string]*resources.QualityMonitor `json:"quality_monitors,omitempty"` } type UniqueResourceIdTracker struct { @@ -121,50 +124,123 @@ func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker, tracker.Type[k] = "registered_model" tracker.ConfigPath[k] = r.RegisteredModels[k].ConfigFilePath } + for k := range r.QualityMonitors { + if _, ok := tracker.Type[k]; ok { + return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)", + k, + tracker.Type[k], + tracker.ConfigPath[k], + "quality_monitor", + r.QualityMonitors[k].ConfigFilePath, + ) + } + tracker.Type[k] = "quality_monitor" + tracker.ConfigPath[k] = r.QualityMonitors[k].ConfigFilePath + } return tracker, nil } -// SetConfigFilePath sets the specified path for all resources contained in this instance. +type resource struct { + resource ConfigResource + resource_type string + key string +} + +func (r *Resources) allResources() []resource { + all := make([]resource, 0) + for k, e := range r.Jobs { + all = append(all, resource{resource_type: "job", resource: e, key: k}) + } + for k, e := range r.Pipelines { + all = append(all, resource{resource_type: "pipeline", resource: e, key: k}) + } + for k, e := range r.Models { + all = append(all, resource{resource_type: "model", resource: e, key: k}) + } + for k, e := range r.Experiments { + all = append(all, resource{resource_type: "experiment", resource: e, key: k}) + } + for k, e := range r.ModelServingEndpoints { + all = append(all, resource{resource_type: "serving endpoint", resource: e, key: k}) + } + for k, e := range r.RegisteredModels { + all = append(all, resource{resource_type: "registered model", resource: e, key: k}) + } + for k, e := range r.QualityMonitors { + all = append(all, resource{resource_type: "quality monitor", resource: e, key: k}) + } + return all +} + +func (r *Resources) VerifyAllResourcesDefined() error { + all := r.allResources() + for _, e := range all { + err := e.resource.Validate() + if err != nil { + return fmt.Errorf("%s %s is not defined", e.resource_type, e.key) + } + } + + return nil +} + +// ConfigureConfigFilePath sets the specified path for all resources contained in this instance. // This property is used to correctly resolve paths relative to the path // of the configuration file they were defined in. -func (r *Resources) SetConfigFilePath(path string) { +func (r *Resources) ConfigureConfigFilePath() { for _, e := range r.Jobs { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.Pipelines { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.Models { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.Experiments { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.ModelServingEndpoints { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() } for _, e := range r.RegisteredModels { - e.ConfigFilePath = path + e.ConfigureConfigFilePath() + } + for _, e := range r.QualityMonitors { + e.ConfigureConfigFilePath() } } -// Merge iterates over all resources and merges chunks of the -// resource configuration that can be merged. For example, for -// jobs, this merges job cluster definitions and tasks that -// use the same `job_cluster_key`, or `task_key`, respectively. -func (r *Resources) Merge() error { - for _, job := range r.Jobs { - if err := job.MergeJobClusters(); err != nil { - return err - } - if err := job.MergeTasks(); err != nil { - return err - } - } - for _, pipeline := range r.Pipelines { - if err := pipeline.MergeClusters(); err != nil { - return err - } - } - return nil +type ConfigResource interface { + Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) + TerraformResourceName() string + Validate() error +} + +func (r *Resources) FindResourceByConfigKey(key string) (ConfigResource, error) { + found := make([]ConfigResource, 0) + for k := range r.Jobs { + if k == key { + found = append(found, r.Jobs[k]) + } + } + for k := range r.Pipelines { + if k == key { + found = append(found, r.Pipelines[k]) + } + } + + if len(found) == 0 { + return nil, fmt.Errorf("no such resource: %s", key) + } + + if len(found) > 1 { + keys := make([]string, 0, len(found)) + for _, r := range found { + keys = append(keys, fmt.Sprintf("%s:%s", r.TerraformResourceName(), key)) + } + return nil, fmt.Errorf("ambiguous: %s (can resolve to all of %s)", key, keys) + } + + return found[0], nil } diff --git a/bundle/config/resources/job.go b/bundle/config/resources/job.go index bd43ed0af..dde5d5663 100644 --- a/bundle/config/resources/job.go +++ b/bundle/config/resources/job.go @@ -1,10 +1,15 @@ package resources import ( + "context" + "fmt" + "strconv" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/imdario/mergo" ) type Job struct { @@ -25,68 +30,29 @@ func (s Job) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// MergeJobClusters merges job clusters with the same key. -// The job clusters field is a slice, and as such, overrides are appended to it. -// We can identify a job cluster by its key, however, so we can use this key -// to figure out which definitions are actually overrides and merge them. -func (j *Job) MergeJobClusters() error { - keys := make(map[string]*jobs.JobCluster) - output := make([]jobs.JobCluster, 0, len(j.JobClusters)) - - // Target overrides are always appended, so we can iterate in natural order to - // first find the base definition, and merge instances we encounter later. - for i := range j.JobClusters { - key := j.JobClusters[i].JobClusterKey - - // Register job cluster with key if not yet seen before. - ref, ok := keys[key] - if !ok { - output = append(output, j.JobClusters[i]) - keys[key] = &output[len(output)-1] - continue - } - - // Merge this instance into the reference. - err := mergo.Merge(ref, &j.JobClusters[i], mergo.WithOverride, mergo.WithAppendSlice) - if err != nil { - return err - } +func (j *Job) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + jobId, err := strconv.Atoi(id) + if err != nil { + return false, err } - - // Overwrite resulting slice. - j.JobClusters = output - return nil + _, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: int64(jobId), + }) + if err != nil { + log.Debugf(ctx, "job %s does not exist", id) + return false, err + } + return true, nil } -// MergeTasks merges tasks with the same key. -// The tasks field is a slice, and as such, overrides are appended to it. -// We can identify a task by its task key, however, so we can use this key -// to figure out which definitions are actually overrides and merge them. -func (j *Job) MergeTasks() error { - keys := make(map[string]*jobs.Task) - tasks := make([]jobs.Task, 0, len(j.Tasks)) +func (j *Job) TerraformResourceName() string { + return "databricks_job" +} - // Target overrides are always appended, so we can iterate in natural order to - // first find the base definition, and merge instances we encounter later. - for i := range j.Tasks { - key := j.Tasks[i].TaskKey - - // Register the task with key if not yet seen before. - ref, ok := keys[key] - if !ok { - tasks = append(tasks, j.Tasks[i]) - keys[key] = &tasks[len(tasks)-1] - continue - } - - // Merge this instance into the reference. - err := mergo.Merge(ref, &j.Tasks[i], mergo.WithOverride, mergo.WithAppendSlice) - if err != nil { - return err - } +func (j *Job) Validate() error { + if j == nil || !j.DynamicValue.IsValid() || j.JobSettings == nil { + return fmt.Errorf("job is not defined") } - // Overwrite resulting slice. - j.Tasks = tasks return nil } diff --git a/bundle/config/resources/job_test.go b/bundle/config/resources/job_test.go deleted file mode 100644 index 24b82fabb..000000000 --- a/bundle/config/resources/job_test.go +++ /dev/null @@ -1,116 +0,0 @@ -package resources - -import ( - "testing" - - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestJobMergeJobClusters(t *testing.T) { - j := &Job{ - JobSettings: &jobs.JobSettings{ - JobClusters: []jobs.JobCluster{ - { - JobClusterKey: "foo", - NewCluster: &compute.ClusterSpec{ - SparkVersion: "13.3.x-scala2.12", - NodeTypeId: "i3.xlarge", - NumWorkers: 2, - }, - }, - { - JobClusterKey: "bar", - NewCluster: &compute.ClusterSpec{ - SparkVersion: "10.4.x-scala2.12", - }, - }, - { - JobClusterKey: "foo", - NewCluster: &compute.ClusterSpec{ - NodeTypeId: "i3.2xlarge", - NumWorkers: 4, - }, - }, - }, - }, - } - - err := j.MergeJobClusters() - require.NoError(t, err) - - assert.Len(t, j.JobClusters, 2) - assert.Equal(t, "foo", j.JobClusters[0].JobClusterKey) - assert.Equal(t, "bar", j.JobClusters[1].JobClusterKey) - - // This job cluster was merged with a subsequent one. - jc0 := j.JobClusters[0].NewCluster - assert.Equal(t, "13.3.x-scala2.12", jc0.SparkVersion) - assert.Equal(t, "i3.2xlarge", jc0.NodeTypeId) - assert.Equal(t, 4, jc0.NumWorkers) - - // This job cluster was left untouched. - jc1 := j.JobClusters[1].NewCluster - assert.Equal(t, "10.4.x-scala2.12", jc1.SparkVersion) -} - -func TestJobMergeTasks(t *testing.T) { - j := &Job{ - JobSettings: &jobs.JobSettings{ - Tasks: []jobs.Task{ - { - TaskKey: "foo", - NewCluster: &compute.ClusterSpec{ - SparkVersion: "13.3.x-scala2.12", - NodeTypeId: "i3.xlarge", - NumWorkers: 2, - }, - Libraries: []compute.Library{ - {Whl: "package1"}, - }, - }, - { - TaskKey: "bar", - NewCluster: &compute.ClusterSpec{ - SparkVersion: "10.4.x-scala2.12", - }, - }, - { - TaskKey: "foo", - NewCluster: &compute.ClusterSpec{ - NodeTypeId: "i3.2xlarge", - NumWorkers: 4, - }, - Libraries: []compute.Library{ - {Pypi: &compute.PythonPyPiLibrary{ - Package: "package2", - }}, - }, - }, - }, - }, - } - - err := j.MergeTasks() - require.NoError(t, err) - - assert.Len(t, j.Tasks, 2) - assert.Equal(t, "foo", j.Tasks[0].TaskKey) - assert.Equal(t, "bar", j.Tasks[1].TaskKey) - - // This task was merged with a subsequent one. - task0 := j.Tasks[0] - cluster := task0.NewCluster - assert.Equal(t, "13.3.x-scala2.12", cluster.SparkVersion) - assert.Equal(t, "i3.2xlarge", cluster.NodeTypeId) - assert.Equal(t, 4, cluster.NumWorkers) - assert.Len(t, task0.Libraries, 2) - assert.Equal(t, task0.Libraries[0].Whl, "package1") - assert.Equal(t, task0.Libraries[1].Pypi.Package, "package2") - - // This task was left untouched. - task1 := j.Tasks[1].NewCluster - assert.Equal(t, "10.4.x-scala2.12", task1.SparkVersion) -} diff --git a/bundle/config/resources/mlflow_experiment.go b/bundle/config/resources/mlflow_experiment.go index 0f53096a0..7854ee7e8 100644 --- a/bundle/config/resources/mlflow_experiment.go +++ b/bundle/config/resources/mlflow_experiment.go @@ -1,7 +1,12 @@ package resources import ( + "context" + "fmt" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -23,3 +28,26 @@ func (s *MlflowExperiment) UnmarshalJSON(b []byte) error { func (s MlflowExperiment) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } + +func (s *MlflowExperiment) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.Experiments.GetExperiment(ctx, ml.GetExperimentRequest{ + ExperimentId: id, + }) + if err != nil { + log.Debugf(ctx, "experiment %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *MlflowExperiment) TerraformResourceName() string { + return "databricks_mlflow_experiment" +} + +func (s *MlflowExperiment) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("experiment is not defined") + } + + return nil +} diff --git a/bundle/config/resources/mlflow_model.go b/bundle/config/resources/mlflow_model.go index 59893aa47..40da9f87d 100644 --- a/bundle/config/resources/mlflow_model.go +++ b/bundle/config/resources/mlflow_model.go @@ -1,7 +1,12 @@ package resources import ( + "context" + "fmt" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/ml" ) @@ -23,3 +28,26 @@ func (s *MlflowModel) UnmarshalJSON(b []byte) error { func (s MlflowModel) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } + +func (s *MlflowModel) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.ModelRegistry.GetModel(ctx, ml.GetModelRequest{ + Name: id, + }) + if err != nil { + log.Debugf(ctx, "model %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *MlflowModel) TerraformResourceName() string { + return "databricks_mlflow_model" +} + +func (s *MlflowModel) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("model is not defined") + } + + return nil +} diff --git a/bundle/config/resources/model_serving_endpoint.go b/bundle/config/resources/model_serving_endpoint.go index d1d57bafc..503cfbbb7 100644 --- a/bundle/config/resources/model_serving_endpoint.go +++ b/bundle/config/resources/model_serving_endpoint.go @@ -1,7 +1,12 @@ package resources import ( + "context" + "fmt" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/serving" ) @@ -33,3 +38,26 @@ func (s *ModelServingEndpoint) UnmarshalJSON(b []byte) error { func (s ModelServingEndpoint) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } + +func (s *ModelServingEndpoint) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.ServingEndpoints.Get(ctx, serving.GetServingEndpointRequest{ + Name: id, + }) + if err != nil { + log.Debugf(ctx, "serving endpoint %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *ModelServingEndpoint) TerraformResourceName() string { + return "databricks_model_serving" +} + +func (s *ModelServingEndpoint) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("serving endpoint is not defined") + } + + return nil +} diff --git a/bundle/config/resources/pipeline.go b/bundle/config/resources/pipeline.go index 43450dc49..7e914b909 100644 --- a/bundle/config/resources/pipeline.go +++ b/bundle/config/resources/pipeline.go @@ -1,12 +1,14 @@ package resources import ( - "strings" + "context" + "fmt" "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/pipelines" - "github.com/imdario/mergo" ) type Pipeline struct { @@ -27,49 +29,25 @@ func (s Pipeline) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// MergeClusters merges cluster definitions with same label. -// The clusters field is a slice, and as such, overrides are appended to it. -// We can identify a cluster by its label, however, so we can use this label -// to figure out which definitions are actually overrides and merge them. -// -// Note: the cluster label is optional and defaults to 'default'. -// We therefore ALSO merge all clusters without a label. -func (p *Pipeline) MergeClusters() error { - clusters := make(map[string]*pipelines.PipelineCluster) - output := make([]pipelines.PipelineCluster, 0, len(p.Clusters)) +func (p *Pipeline) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.Pipelines.Get(ctx, pipelines.GetPipelineRequest{ + PipelineId: id, + }) + if err != nil { + log.Debugf(ctx, "pipeline %s does not exist", id) + return false, err + } + return true, nil +} - // Normalize cluster labels. - // If empty, this defaults to "default". - // To make matching case insensitive, labels are lowercased. - for i := range p.Clusters { - label := p.Clusters[i].Label - if label == "" { - label = "default" - } - p.Clusters[i].Label = strings.ToLower(label) +func (p *Pipeline) TerraformResourceName() string { + return "databricks_pipeline" +} + +func (p *Pipeline) Validate() error { + if p == nil || !p.DynamicValue.IsValid() { + return fmt.Errorf("pipeline is not defined") } - // Target overrides are always appended, so we can iterate in natural order to - // first find the base definition, and merge instances we encounter later. - for i := range p.Clusters { - label := p.Clusters[i].Label - - // Register pipeline cluster with label if not yet seen before. - ref, ok := clusters[label] - if !ok { - output = append(output, p.Clusters[i]) - clusters[label] = &output[len(output)-1] - continue - } - - // Merge this instance into the reference. - err := mergo.Merge(ref, &p.Clusters[i], mergo.WithOverride, mergo.WithAppendSlice) - if err != nil { - return err - } - } - - // Overwrite resulting slice. - p.Clusters = output return nil } diff --git a/bundle/config/resources/pipeline_test.go b/bundle/config/resources/pipeline_test.go deleted file mode 100644 index 316e3d145..000000000 --- a/bundle/config/resources/pipeline_test.go +++ /dev/null @@ -1,76 +0,0 @@ -package resources - -import ( - "strings" - "testing" - - "github.com/databricks/databricks-sdk-go/service/pipelines" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestPipelineMergeClusters(t *testing.T) { - p := &Pipeline{ - PipelineSpec: &pipelines.PipelineSpec{ - Clusters: []pipelines.PipelineCluster{ - { - NodeTypeId: "i3.xlarge", - NumWorkers: 2, - PolicyId: "1234", - }, - { - Label: "maintenance", - NodeTypeId: "i3.2xlarge", - }, - { - NodeTypeId: "i3.2xlarge", - NumWorkers: 4, - }, - }, - }, - } - - err := p.MergeClusters() - require.NoError(t, err) - - assert.Len(t, p.Clusters, 2) - assert.Equal(t, "default", p.Clusters[0].Label) - assert.Equal(t, "maintenance", p.Clusters[1].Label) - - // The default cluster was merged with a subsequent one. - pc0 := p.Clusters[0] - assert.Equal(t, "i3.2xlarge", pc0.NodeTypeId) - assert.Equal(t, 4, pc0.NumWorkers) - assert.Equal(t, "1234", pc0.PolicyId) - - // The maintenance cluster was left untouched. - pc1 := p.Clusters[1] - assert.Equal(t, "i3.2xlarge", pc1.NodeTypeId) -} - -func TestPipelineMergeClustersCaseInsensitive(t *testing.T) { - p := &Pipeline{ - PipelineSpec: &pipelines.PipelineSpec{ - Clusters: []pipelines.PipelineCluster{ - { - Label: "default", - NumWorkers: 2, - }, - { - Label: "DEFAULT", - NumWorkers: 4, - }, - }, - }, - } - - err := p.MergeClusters() - require.NoError(t, err) - - assert.Len(t, p.Clusters, 1) - - // The default cluster was merged with a subsequent one. - pc0 := p.Clusters[0] - assert.Equal(t, "default", strings.ToLower(pc0.Label)) - assert.Equal(t, 4, pc0.NumWorkers) -} diff --git a/bundle/config/resources/quality_monitor.go b/bundle/config/resources/quality_monitor.go new file mode 100644 index 000000000..0d13e58fa --- /dev/null +++ b/bundle/config/resources/quality_monitor.go @@ -0,0 +1,60 @@ +package resources + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/marshal" + "github.com/databricks/databricks-sdk-go/service/catalog" +) + +type QualityMonitor struct { + // Represents the Input Arguments for Terraform and will get + // converted to a HCL representation for CRUD + *catalog.CreateMonitor + + // This represents the id which is the full name of the monitor + // (catalog_name.schema_name.table_name) that can be used + // as a reference in other resources. This value is returned by terraform. + ID string `json:"id,omitempty" bundle:"readonly"` + + // Path to config file where the resource is defined. All bundle resources + // include this for interpolation purposes. + paths.Paths + + ModifiedStatus ModifiedStatus `json:"modified_status,omitempty" bundle:"internal"` +} + +func (s *QualityMonitor) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QualityMonitor) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +func (s *QualityMonitor) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.QualityMonitors.Get(ctx, catalog.GetQualityMonitorRequest{ + TableName: id, + }) + if err != nil { + log.Debugf(ctx, "quality monitor %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *QualityMonitor) TerraformResourceName() string { + return "databricks_quality_monitor" +} + +func (s *QualityMonitor) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("quality monitor is not defined") + } + + return nil +} diff --git a/bundle/config/resources/registered_model.go b/bundle/config/resources/registered_model.go index 7b4b70d1a..fba643c69 100644 --- a/bundle/config/resources/registered_model.go +++ b/bundle/config/resources/registered_model.go @@ -1,7 +1,12 @@ package resources import ( + "context" + "fmt" + "github.com/databricks/cli/bundle/config/paths" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/catalog" ) @@ -34,3 +39,26 @@ func (s *RegisteredModel) UnmarshalJSON(b []byte) error { func (s RegisteredModel) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } + +func (s *RegisteredModel) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) { + _, err := w.RegisteredModels.Get(ctx, catalog.GetRegisteredModelRequest{ + FullName: id, + }) + if err != nil { + log.Debugf(ctx, "registered model %s does not exist", id) + return false, err + } + return true, nil +} + +func (s *RegisteredModel) TerraformResourceName() string { + return "databricks_registered_model" +} + +func (s *RegisteredModel) Validate() error { + if s == nil || !s.DynamicValue.IsValid() { + return fmt.Errorf("registered model is not defined") + } + + return nil +} diff --git a/bundle/config/resources_test.go b/bundle/config/resources_test.go index 9c4104e4d..7415029b1 100644 --- a/bundle/config/resources_test.go +++ b/bundle/config/resources_test.go @@ -1,6 +1,8 @@ package config import ( + "encoding/json" + "reflect" "testing" "github.com/databricks/cli/bundle/config/paths" @@ -125,3 +127,57 @@ func TestVerifySafeMergeForRegisteredModels(t *testing.T) { err := r.VerifySafeMerge(&other) assert.ErrorContains(t, err, "multiple resources named bar (registered_model at bar.yml, registered_model at bar2.yml)") } + +// This test ensures that all resources have a custom marshaller and unmarshaller. +// This is required because DABs resources map to Databricks APIs, and they do so +// by embedding the corresponding Go SDK structs. +// +// Go SDK structs often implement custom marshalling and unmarshalling methods (based on the API specifics). +// If the Go SDK struct implements custom marshalling and unmarshalling and we do not +// for the resources at the top level, marshalling and unmarshalling operations will panic. +// Thus we will be overly cautious and ensure that all resources need a custom marshaller and unmarshaller. +// +// Why do we not assert this using an interface to assert MarshalJSON and UnmarshalJSON +// are implemented at the top level? +// If a method is implemented for an embedded struct, the top level struct will +// also have that method and satisfy the interface. This is why we cannot assert +// that the methods are implemented at the top level using an interface. +// +// Why don't we use reflection to assert that the methods are implemented at the +// top level? +// Same problem as above, the golang reflection package does not seem to provide +// a way to directly assert that MarshalJSON and UnmarshalJSON are implemented +// at the top level. +func TestCustomMarshallerIsImplemented(t *testing.T) { + r := Resources{} + rt := reflect.TypeOf(r) + + for i := 0; i < rt.NumField(); i++ { + field := rt.Field(i) + + // Fields in Resources are expected be of the form map[string]*resourceStruct + assert.Equal(t, field.Type.Kind(), reflect.Map, "Resource %s is not a map", field.Name) + kt := field.Type.Key() + assert.Equal(t, kt.Kind(), reflect.String, "Resource %s is not a map with string keys", field.Name) + vt := field.Type.Elem() + assert.Equal(t, vt.Kind(), reflect.Ptr, "Resource %s is not a map with pointer values", field.Name) + + // Marshalling a resourceStruct will panic if resourceStruct does not have a custom marshaller + // This is because resourceStruct embeds a Go SDK struct that implements + // a custom marshaller. + // Eg: resource.Job implements MarshalJSON + v := reflect.Zero(vt.Elem()).Interface() + assert.NotPanics(t, func() { + json.Marshal(v) + }, "Resource %s does not have a custom marshaller", field.Name) + + // Unmarshalling a *resourceStruct will panic if the resource does not have a custom unmarshaller + // This is because resourceStruct embeds a Go SDK struct that implements + // a custom unmarshaller. + // Eg: *resource.Job implements UnmarshalJSON + v = reflect.New(vt.Elem()).Interface() + assert.NotPanics(t, func() { + json.Unmarshal([]byte("{}"), v) + }, "Resource %s does not have a custom unmarshaller", field.Name) + } +} diff --git a/bundle/config/root.go b/bundle/config/root.go index 94cc0b177..594a9105f 100644 --- a/bundle/config/root.go +++ b/bundle/config/root.go @@ -1,22 +1,26 @@ package config import ( + "bytes" + "context" "fmt" "os" - "path/filepath" "strings" "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/dyn/merge" + "github.com/databricks/cli/libs/dyn/yamlloader" + "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/jobs" - "github.com/ghodss/yaml" - "github.com/imdario/mergo" ) type Root struct { - // Path contains the directory path to the root of the bundle. - // It is set when loading `databricks.yml`. - Path string `json:"-" bundle:"readonly"` + value dyn.Value + depth int // Contains user defined variables Variables map[string]*variable.Variable `json:"variables,omitempty"` @@ -64,54 +68,187 @@ type Root struct { } // Load loads the bundle configuration file at the specified path. -func Load(path string) (*Root, error) { +func Load(path string) (*Root, diag.Diagnostics) { raw, err := os.ReadFile(path) if err != nil { - return nil, err + return nil, diag.FromErr(err) } - var r Root - err = yaml.Unmarshal(raw, &r) + return LoadFromBytes(path, raw) +} + +func LoadFromBytes(path string, raw []byte) (*Root, diag.Diagnostics) { + r := Root{} + + // Load configuration tree from YAML. + v, err := yamlloader.LoadYAML(path, bytes.NewBuffer(raw)) if err != nil { - return nil, fmt.Errorf("failed to load %s: %w", path, err) + return nil, diag.Errorf("failed to load %s: %v", path, err) } - if r.Environments != nil && r.Targets != nil { - return nil, fmt.Errorf("both 'environments' and 'targets' are specified, only 'targets' should be used: %s", path) + // Rewrite configuration tree where necessary. + v, err = rewriteShorthands(v) + if err != nil { + return nil, diag.Errorf("failed to rewrite %s: %v", path, err) } - if r.Environments != nil { - //TODO: add a command line notice that this is a deprecated option. - r.Targets = r.Environments - } + // Normalize dynamic configuration tree according to configuration type. + v, diags := convert.Normalize(r, v) - r.Path = filepath.Dir(path) - r.SetConfigFilePath(path) + // Convert normalized configuration tree to typed configuration. + err = r.updateWithDynamicValue(v) + if err != nil { + return nil, diag.Errorf("failed to load %s: %v", path, err) + } _, err = r.Resources.VerifyUniqueResourceIdentifiers() - return &r, err + if err != nil { + diags = diags.Extend(diag.FromErr(err)) + } + return &r, diags +} + +func (r *Root) initializeDynamicValue() error { + // Many test cases initialize a config as a Go struct literal. + // The value will be invalid and we need to populate it from the typed configuration. + if r.value.IsValid() { + return nil + } + + nv, err := convert.FromTyped(r, dyn.NilValue) + if err != nil { + return err + } + + r.value = nv + return nil +} + +func (r *Root) updateWithDynamicValue(nv dyn.Value) error { + // Hack: restore state; it may be cleared by [ToTyped] if + // the configuration equals nil (happens in tests). + depth := r.depth + + defer func() { + r.depth = depth + }() + + // Convert normalized configuration tree to typed configuration. + err := convert.ToTyped(r, nv) + if err != nil { + return err + } + + // Assign the normalized configuration tree. + r.value = nv + + // At the moment the check has to be done as part of updateWithDynamicValue + // because otherwise ConfigureConfigFilePath will fail with a panic. + // In the future, we should move this check to a separate mutator in initialise phase. + err = r.Resources.VerifyAllResourcesDefined() + if err != nil { + return err + } + + // Assign config file paths after converting to typed configuration. + r.ConfigureConfigFilePath() + return nil +} + +// Mutate applies a transformation to the dynamic configuration value of a Root object. +// +// Parameters: +// - fn: A function that mutates a dyn.Value object +// +// Example usage, setting bundle.deployment.lock.enabled to false: +// +// err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { +// return dyn.Map(v, "bundle.deployment.lock", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { +// return dyn.Set(v, "enabled", dyn.V(false)) +// }) +// }) +func (r *Root) Mutate(fn func(dyn.Value) (dyn.Value, error)) error { + err := r.initializeDynamicValue() + if err != nil { + return err + } + nv, err := fn(r.value) + if err != nil { + return err + } + err = r.updateWithDynamicValue(nv) + if err != nil { + return err + } + return nil +} + +func (r *Root) MarkMutatorEntry(ctx context.Context) error { + err := r.initializeDynamicValue() + if err != nil { + return err + } + + r.depth++ + + // If we are entering a mutator at depth 1, we need to convert + // the dynamic configuration tree to typed configuration. + if r.depth == 1 { + // Always run ToTyped upon entering a mutator. + // Convert normalized configuration tree to typed configuration. + err := r.updateWithDynamicValue(r.value) + if err != nil { + log.Warnf(ctx, "unable to convert dynamic configuration to typed configuration: %v", err) + return err + } + + } else { + nv, err := convert.FromTyped(r, r.value) + if err != nil { + log.Warnf(ctx, "unable to convert typed configuration to dynamic configuration: %v", err) + return err + } + + // Re-run ToTyped to ensure that no state is piggybacked + err = r.updateWithDynamicValue(nv) + if err != nil { + log.Warnf(ctx, "unable to convert dynamic configuration to typed configuration: %v", err) + return err + } + } + + return nil +} + +func (r *Root) MarkMutatorExit(ctx context.Context) error { + r.depth-- + + // If we are exiting a mutator at depth 0, we need to convert + // the typed configuration to a dynamic configuration tree. + if r.depth == 0 { + nv, err := convert.FromTyped(r, r.value) + if err != nil { + log.Warnf(ctx, "unable to convert typed configuration to dynamic configuration: %v", err) + return err + } + + // Re-run ToTyped to ensure that no state is piggybacked + err = r.updateWithDynamicValue(nv) + if err != nil { + log.Warnf(ctx, "unable to convert dynamic configuration to typed configuration: %v", err) + return err + } + } + + return nil } // SetConfigFilePath configures the path that its configuration // was loaded from in configuration leafs that require it. -func (r *Root) SetConfigFilePath(path string) { - r.Resources.SetConfigFilePath(path) +func (r *Root) ConfigureConfigFilePath() { + r.Resources.ConfigureConfigFilePath() if r.Artifacts != nil { - r.Artifacts.SetConfigFilePath(path) - } - - if r.Targets != nil { - for _, env := range r.Targets { - if env == nil { - continue - } - if env.Resources != nil { - env.Resources.SetConfigFilePath(path) - } - if env.Artifacts != nil { - env.Artifacts.SetConfigFilePath(path) - } - } + r.Artifacts.ConfigureConfigFilePath() } } @@ -130,6 +267,11 @@ func (r *Root) InitializeVariables(vars []string) error { if _, ok := r.Variables[name]; !ok { return fmt.Errorf("variable %s has not been defined", name) } + + if r.Variables[name].IsComplex() { + return fmt.Errorf("setting variables of complex type via --var flag is not supported: %s", name) + } + err := r.Variables[name].Set(val) if err != nil { return fmt.Errorf("failed to assign %s to %s: %s", val, name, err) @@ -139,127 +281,251 @@ func (r *Root) InitializeVariables(vars []string) error { } func (r *Root) Merge(other *Root) error { - err := r.Sync.Merge(r, other) - if err != nil { - return err - } - other.Sync = Sync{} - - // TODO: when hooking into merge semantics, disallow setting path on the target instance. - other.Path = "" - // Check for safe merge, protecting against duplicate resource identifiers - err = r.Resources.VerifySafeMerge(&other.Resources) + err := r.Resources.VerifySafeMerge(&other.Resources) if err != nil { return err } - // TODO: define and test semantics for merging. - return mergo.Merge(r, other, mergo.WithOverride) + // Merge dynamic configuration values. + return r.Mutate(func(root dyn.Value) (dyn.Value, error) { + return merge.Merge(root, other.value) + }) } -func (r *Root) MergeTargetOverrides(target *Target) error { +func mergeField(rv, ov dyn.Value, name string) (dyn.Value, error) { + path := dyn.NewPath(dyn.Key(name)) + reference, _ := dyn.GetByPath(rv, path) + override, _ := dyn.GetByPath(ov, path) + + // Merge the override into the reference. + var out dyn.Value var err error - - // Target may be nil if it's empty. - if target == nil { - return nil + if reference.IsValid() && override.IsValid() { + out, err = merge.Merge(reference, override) + if err != nil { + return dyn.InvalidValue, err + } + } else if reference.IsValid() { + out = reference + } else if override.IsValid() { + out = override + } else { + return rv, nil } - if target.Bundle != nil { - err = mergo.Merge(&r.Bundle, target.Bundle, mergo.WithOverride) - if err != nil { + return dyn.SetByPath(rv, path, out) +} + +func (r *Root) MergeTargetOverrides(name string) error { + root := r.value + target, err := dyn.GetByPath(root, dyn.NewPath(dyn.Key("targets"), dyn.Key(name))) + if err != nil { + return err + } + + // Confirm validity of variable overrides. + err = validateVariableOverrides(root, target) + if err != nil { + return err + } + + // Merge fields that can be merged 1:1. + for _, f := range []string{ + "bundle", + "workspace", + "artifacts", + "resources", + "sync", + "permissions", + } { + if root, err = mergeField(root, target, f); err != nil { return err } } - if target.Workspace != nil { - err = mergo.Merge(&r.Workspace, target.Workspace, mergo.WithOverride) - if err != nil { - return err - } - } + // Merge `variables`. This field must be overwritten if set, not merged. + if v := target.Get("variables"); v.Kind() != dyn.KindInvalid { + _, err = dyn.Map(v, ".", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) { + varPath := dyn.MustPathFromString("variables").Append(p...) - if target.Artifacts != nil { - err = mergo.Merge(&r.Artifacts, target.Artifacts, mergo.WithOverride, mergo.WithAppendSlice) - if err != nil { - return err - } - } - - if target.Resources != nil { - err = mergo.Merge(&r.Resources, target.Resources, mergo.WithOverride, mergo.WithAppendSlice) - if err != nil { - return err - } - - err = r.Resources.Merge() - if err != nil { - return err - } - } - - if target.Variables != nil { - for k, v := range target.Variables { - rootVariable, ok := r.Variables[k] - if !ok { - return fmt.Errorf("variable %s is not defined but is assigned a value", k) + vDefault := variable.Get("default") + if vDefault.Kind() != dyn.KindInvalid { + defaultPath := varPath.Append(dyn.Key("default")) + root, err = dyn.SetByPath(root, defaultPath, vDefault) } - if sv, ok := v.(string); ok { - // we allow overrides of the default value for a variable - defaultVal := sv - rootVariable.Default = &defaultVal - } else if vv, ok := v.(map[string]any); ok { - // we also allow overrides of the lookup value for a variable - lookup, ok := vv["lookup"] - if !ok { - return fmt.Errorf("variable %s is incorrectly defined lookup override, no 'lookup' key defined", k) + vLookup := variable.Get("lookup") + if vLookup.Kind() != dyn.KindInvalid { + lookupPath := varPath.Append(dyn.Key("lookup")) + root, err = dyn.SetByPath(root, lookupPath, vLookup) + } + + return root, err + })) + if err != nil { + return err + } + } + + // Merge `run_as`. This field must be overwritten if set, not merged. + if v := target.Get("run_as"); v.Kind() != dyn.KindInvalid { + root, err = dyn.Set(root, "run_as", v) + if err != nil { + return err + } + } + + // Below, we're setting fields on the bundle key, so make sure it exists. + if root.Get("bundle").Kind() == dyn.KindInvalid { + root, err = dyn.Set(root, "bundle", dyn.V(map[string]dyn.Value{})) + if err != nil { + return err + } + } + + // Merge `mode`. This field must be overwritten if set, not merged. + if v := target.Get("mode"); v.Kind() != dyn.KindInvalid { + root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("mode")), v) + if err != nil { + return err + } + } + + // Merge `compute_id`. This field must be overwritten if set, not merged. + if v := target.Get("compute_id"); v.Kind() != dyn.KindInvalid { + root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("compute_id")), v) + if err != nil { + return err + } + } + + // Merge `git`. + if v := target.Get("git"); v.Kind() != dyn.KindInvalid { + ref, err := dyn.GetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git"))) + if err != nil { + ref = dyn.V(map[string]dyn.Value{}) + } + + // Merge the override into the reference. + out, err := merge.Merge(ref, v) + if err != nil { + return err + } + + // If the branch was overridden, we need to clear the inferred flag. + if branch := v.Get("branch"); branch.Kind() != dyn.KindInvalid { + out, err = dyn.SetByPath(out, dyn.NewPath(dyn.Key("inferred")), dyn.V(false)) + if err != nil { + return err + } + } + + // Set the merged value. + root, err = dyn.SetByPath(root, dyn.NewPath(dyn.Key("bundle"), dyn.Key("git")), out) + if err != nil { + return err + } + } + + // Convert normalized configuration tree to typed configuration. + return r.updateWithDynamicValue(root) +} + +// rewriteShorthands performs lightweight rewriting of the configuration +// tree where we allow users to write a shorthand and must rewrite to the full form. +func rewriteShorthands(v dyn.Value) (dyn.Value, error) { + if v.Kind() != dyn.KindMap { + return v, nil + } + + // For each target, rewrite the variables block. + return dyn.Map(v, "targets", dyn.Foreach(func(_ dyn.Path, target dyn.Value) (dyn.Value, error) { + // Confirm it has a variables block. + if target.Get("variables").Kind() == dyn.KindInvalid { + return target, nil + } + + // For each variable, normalize its contents if it is a single string. + return dyn.Map(target, "variables", dyn.Foreach(func(p dyn.Path, variable dyn.Value) (dyn.Value, error) { + switch variable.Kind() { + + case dyn.KindString, dyn.KindBool, dyn.KindFloat, dyn.KindInt: + // Rewrite the variable to a map with a single key called "default". + // This conforms to the variable type. Normalization back to the typed + // configuration will convert this to a string if necessary. + return dyn.NewValue(map[string]dyn.Value{ + "default": variable, + }, variable.Locations()), nil + + case dyn.KindMap, dyn.KindSequence: + // Check if the original definition of variable has a type field. + typeV, err := dyn.GetByPath(v, p.Append(dyn.Key("type"))) + if err != nil { + return variable, nil } - rootVariable.Lookup = variable.LookupFromMap(lookup.(map[string]any)) - } else { - return fmt.Errorf("variable %s is incorrectly defined in target override", k) + + if typeV.MustString() == "complex" { + return dyn.NewValue(map[string]dyn.Value{ + "type": typeV, + "default": variable, + }, variable.Locations()), nil + } + + return variable, nil + + default: + return variable, nil } + })) + })) +} + +// validateVariableOverrides checks that all variables specified +// in the target override are also defined in the root. +func validateVariableOverrides(root, target dyn.Value) (err error) { + var rv map[string]variable.Variable + var tv map[string]variable.Variable + + // Collect variables from the root. + if v := root.Get("variables"); v.Kind() != dyn.KindInvalid { + err = convert.ToTyped(&rv, v) + if err != nil { + return fmt.Errorf("unable to collect variables from root: %w", err) } } - if target.RunAs != nil { - r.RunAs = target.RunAs - } - - if target.Mode != "" { - r.Bundle.Mode = target.Mode - } - - if target.ComputeID != "" { - r.Bundle.ComputeID = target.ComputeID - } - - git := &r.Bundle.Git - if target.Git.Branch != "" { - git.Branch = target.Git.Branch - git.Inferred = false - } - if target.Git.Commit != "" { - git.Commit = target.Git.Commit - } - if target.Git.OriginURL != "" { - git.OriginURL = target.Git.OriginURL - } - - if target.Sync != nil { - err = mergo.Merge(&r.Sync, target.Sync, mergo.WithAppendSlice) + // Collect variables from the target. + if v := target.Get("variables"); v.Kind() != dyn.KindInvalid { + err = convert.ToTyped(&tv, v) if err != nil { - return err + return fmt.Errorf("unable to collect variables from target: %w", err) } } - if target.Permissions != nil { - err = mergo.Merge(&r.Permissions, target.Permissions, mergo.WithAppendSlice) - if err != nil { - return err + // Check that all variables in the target exist in the root. + for k := range tv { + if _, ok := rv[k]; !ok { + return fmt.Errorf("variable %s is not defined but is assigned a value", k) } } return nil } + +// Best effort to get the location of configuration value at the specified path. +// This function is useful to annotate error messages with the location, because +// we don't want to fail with a different error message if we cannot retrieve the location. +func (r Root) GetLocation(path string) dyn.Location { + v, err := dyn.Get(r.value, path) + if err != nil { + return dyn.Location{} + } + return v.Location() +} + +// Value returns the dynamic configuration value of the root object. This value +// is the source of truth and is kept in sync with values in the typed configuration. +func (r Root) Value() dyn.Value { + return r.value +} diff --git a/bundle/config/root_test.go b/bundle/config/root_test.go index 3f37da07a..aed670d6c 100644 --- a/bundle/config/root_test.go +++ b/bundle/config/root_test.go @@ -25,69 +25,24 @@ func TestRootMarshalUnmarshal(t *testing.T) { } func TestRootLoad(t *testing.T) { - root, err := Load("../tests/basic/databricks.yml") - require.NoError(t, err) + root, diags := Load("../tests/basic/databricks.yml") + require.NoError(t, diags.Error()) assert.Equal(t, "basic", root.Bundle.Name) } -func TestRootMergeStruct(t *testing.T) { - root := &Root{ - Path: "path", - Workspace: Workspace{ - Host: "foo", - Profile: "profile", - }, - } - other := &Root{ - Path: "path", - Workspace: Workspace{ - Host: "bar", - }, - } - assert.NoError(t, root.Merge(other)) - assert.Equal(t, "bar", root.Workspace.Host) - assert.Equal(t, "profile", root.Workspace.Profile) -} - -func TestRootMergeMap(t *testing.T) { - root := &Root{ - Path: "path", - Targets: map[string]*Target{ - "development": { - Workspace: &Workspace{ - Host: "foo", - Profile: "profile", - }, - }, - }, - } - other := &Root{ - Path: "path", - Targets: map[string]*Target{ - "development": { - Workspace: &Workspace{ - Host: "bar", - }, - }, - }, - } - assert.NoError(t, root.Merge(other)) - assert.Equal(t, &Workspace{Host: "bar", Profile: "profile"}, root.Targets["development"].Workspace) -} - func TestDuplicateIdOnLoadReturnsError(t *testing.T) { - _, err := Load("./testdata/duplicate_resource_names_in_root/databricks.yml") - assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)") + _, diags := Load("./testdata/duplicate_resource_names_in_root/databricks.yml") + assert.ErrorContains(t, diags.Error(), "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)") } func TestDuplicateIdOnMergeReturnsError(t *testing.T) { - root, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml") - require.NoError(t, err) + root, diags := Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml") + require.NoError(t, diags.Error()) - other, err := Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml") - require.NoError(t, err) + other, diags := Load("./testdata/duplicate_resource_name_in_subconfiguration/resources.yml") + require.NoError(t, diags.Error()) - err = root.Merge(other) + err := root.Merge(other) assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml, pipeline at ./testdata/duplicate_resource_name_in_subconfiguration/resources.yml)") } @@ -96,7 +51,7 @@ func TestInitializeVariables(t *testing.T) { root := &Root{ Variables: map[string]*variable.Variable{ "foo": { - Default: &fooDefault, + Default: fooDefault, Description: "an optional variable since default is defined", }, "bar": { @@ -107,8 +62,8 @@ func TestInitializeVariables(t *testing.T) { err := root.InitializeVariables([]string{"foo=123", "bar=456"}) assert.NoError(t, err) - assert.Equal(t, "123", *(root.Variables["foo"].Value)) - assert.Equal(t, "456", *(root.Variables["bar"].Value)) + assert.Equal(t, "123", (root.Variables["foo"].Value)) + assert.Equal(t, "456", (root.Variables["bar"].Value)) } func TestInitializeVariablesWithAnEqualSignInValue(t *testing.T) { @@ -122,7 +77,7 @@ func TestInitializeVariablesWithAnEqualSignInValue(t *testing.T) { err := root.InitializeVariables([]string{"foo=123=567"}) assert.NoError(t, err) - assert.Equal(t, "123=567", *(root.Variables["foo"].Value)) + assert.Equal(t, "123=567", (root.Variables["foo"].Value)) } func TestInitializeVariablesInvalidFormat(t *testing.T) { @@ -154,8 +109,79 @@ func TestInitializeVariablesUndefinedVariables(t *testing.T) { func TestRootMergeTargetOverridesWithMode(t *testing.T) { root := &Root{ Bundle: Bundle{}, + Targets: map[string]*Target{ + "development": { + Mode: Development, + }, + }, } - env := &Target{Mode: Development} - require.NoError(t, root.MergeTargetOverrides(env)) + root.initializeDynamicValue() + require.NoError(t, root.MergeTargetOverrides("development")) assert.Equal(t, Development, root.Bundle.Mode) } + +func TestInitializeComplexVariablesViaFlagIsNotAllowed(t *testing.T) { + root := &Root{ + Variables: map[string]*variable.Variable{ + "foo": { + Type: variable.VariableTypeComplex, + }, + }, + } + + err := root.InitializeVariables([]string{"foo=123"}) + assert.ErrorContains(t, err, "setting variables of complex type via --var flag is not supported: foo") +} + +func TestRootMergeTargetOverridesWithVariables(t *testing.T) { + root := &Root{ + Bundle: Bundle{}, + Variables: map[string]*variable.Variable{ + "foo": { + Default: "foo", + Description: "foo var", + }, + "foo2": { + Default: "foo2", + Description: "foo2 var", + }, + "complex": { + Type: variable.VariableTypeComplex, + Description: "complex var", + Default: map[string]interface{}{ + "key": "value", + }, + }, + }, + Targets: map[string]*Target{ + "development": { + Variables: map[string]*variable.Variable{ + "foo": { + Default: "bar", + Description: "wrong", + }, + "complex": { + Type: "wrong", + Description: "wrong", + Default: map[string]interface{}{ + "key1": "value1", + }, + }, + }, + }, + }, + } + root.initializeDynamicValue() + require.NoError(t, root.MergeTargetOverrides("development")) + assert.Equal(t, "bar", root.Variables["foo"].Default) + assert.Equal(t, "foo var", root.Variables["foo"].Description) + + assert.Equal(t, "foo2", root.Variables["foo2"].Default) + assert.Equal(t, "foo2 var", root.Variables["foo2"].Description) + + assert.Equal(t, map[string]interface{}{ + "key1": "value1", + }, root.Variables["complex"].Default) + assert.Equal(t, "complex var", root.Variables["complex"].Description) + +} diff --git a/bundle/config/sync.go b/bundle/config/sync.go index 6ba2603c4..0580e4c4f 100644 --- a/bundle/config/sync.go +++ b/bundle/config/sync.go @@ -1,7 +1,5 @@ package config -import "path/filepath" - type Sync struct { // Include contains a list of globs evaluated relative to the bundle root path // to explicitly include files that were excluded by the user's gitignore. @@ -13,19 +11,3 @@ type Sync struct { // 2) the `Include` field above. Exclude []string `json:"exclude,omitempty"` } - -func (s *Sync) Merge(root *Root, other *Root) error { - path, err := filepath.Rel(root.Path, other.Path) - if err != nil { - return err - } - for _, include := range other.Sync.Include { - s.Include = append(s.Include, filepath.Join(path, include)) - } - - for _, exclude := range other.Sync.Exclude { - s.Exclude = append(s.Exclude, filepath.Join(path, exclude)) - } - - return nil -} diff --git a/bundle/config/target.go b/bundle/config/target.go index 158f25606..acc493574 100644 --- a/bundle/config/target.go +++ b/bundle/config/target.go @@ -2,6 +2,7 @@ package config import ( "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/config/variable" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -33,7 +34,7 @@ type Target struct { // Override default values or lookup name for defined variables // Does not permit defining new variables or redefining existing ones // in the scope of an target - Variables map[string]any `json:"variables,omitempty"` + Variables map[string]*variable.Variable `json:"variables,omitempty"` Git Git `json:"git,omitempty"` diff --git a/bundle/config/validate/files_to_sync.go b/bundle/config/validate/files_to_sync.go new file mode 100644 index 000000000..d53e38243 --- /dev/null +++ b/bundle/config/validate/files_to_sync.go @@ -0,0 +1,54 @@ +package validate + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/files" + "github.com/databricks/cli/libs/diag" +) + +func FilesToSync() bundle.ReadOnlyMutator { + return &filesToSync{} +} + +type filesToSync struct { +} + +func (v *filesToSync) Name() string { + return "validate:files_to_sync" +} + +func (v *filesToSync) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + sync, err := files.GetSync(ctx, rb) + if err != nil { + return diag.FromErr(err) + } + + fl, err := sync.GetFileList(ctx) + if err != nil { + return diag.FromErr(err) + } + + if len(fl) != 0 { + return nil + } + + diags := diag.Diagnostics{} + if len(rb.Config().Sync.Exclude) == 0 { + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: "There are no files to sync, please check your .gitignore", + }) + } else { + loc := location{path: "sync.exclude", rb: rb} + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: "There are no files to sync, please check your .gitignore and sync.exclude configuration", + Location: loc.Location(), + Path: loc.Path(), + }) + } + + return diags +} diff --git a/bundle/config/validate/job_cluster_key_defined.go b/bundle/config/validate/job_cluster_key_defined.go new file mode 100644 index 000000000..37ed3f417 --- /dev/null +++ b/bundle/config/validate/job_cluster_key_defined.go @@ -0,0 +1,53 @@ +package validate + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" +) + +func JobClusterKeyDefined() bundle.ReadOnlyMutator { + return &jobClusterKeyDefined{} +} + +type jobClusterKeyDefined struct { +} + +func (v *jobClusterKeyDefined) Name() string { + return "validate:job_cluster_key_defined" +} + +func (v *jobClusterKeyDefined) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + diags := diag.Diagnostics{} + + for k, job := range rb.Config().Resources.Jobs { + jobClusterKeys := make(map[string]bool) + for _, cluster := range job.JobClusters { + if cluster.JobClusterKey != "" { + jobClusterKeys[cluster.JobClusterKey] = true + } + } + + for index, task := range job.Tasks { + if task.JobClusterKey != "" { + if _, ok := jobClusterKeys[task.JobClusterKey]; !ok { + loc := location{ + path: fmt.Sprintf("resources.jobs.%s.tasks[%d].job_cluster_key", k, index), + rb: rb, + } + + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("job_cluster_key %s is not defined", task.JobClusterKey), + Location: loc.Location(), + Path: loc.Path(), + }) + } + } + } + } + + return diags +} diff --git a/bundle/config/validate/job_cluster_key_defined_test.go b/bundle/config/validate/job_cluster_key_defined_test.go new file mode 100644 index 000000000..176b0fedc --- /dev/null +++ b/bundle/config/validate/job_cluster_key_defined_test.go @@ -0,0 +1,97 @@ +package validate + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestJobClusterKeyDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Name: "job1", + JobClusters: []jobs.JobCluster{ + {JobClusterKey: "do-not-exist"}, + }, + Tasks: []jobs.Task{ + {JobClusterKey: "do-not-exist"}, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined()) + require.Len(t, diags, 0) + require.NoError(t, diags.Error()) +} + +func TestJobClusterKeyNotDefined(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Name: "job1", + Tasks: []jobs.Task{ + {JobClusterKey: "do-not-exist"}, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined()) + require.Len(t, diags, 1) + require.NoError(t, diags.Error()) + require.Equal(t, diags[0].Severity, diag.Warning) + require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined") +} + +func TestJobClusterKeyDefinedInDifferentJob(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Name: "job1", + Tasks: []jobs.Task{ + {JobClusterKey: "do-not-exist"}, + }, + }, + }, + "job2": { + JobSettings: &jobs.JobSettings{ + Name: "job2", + JobClusters: []jobs.JobCluster{ + {JobClusterKey: "do-not-exist"}, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), JobClusterKeyDefined()) + require.Len(t, diags, 1) + require.NoError(t, diags.Error()) + require.Equal(t, diags[0].Severity, diag.Warning) + require.Equal(t, diags[0].Summary, "job_cluster_key do-not-exist is not defined") +} diff --git a/bundle/config/validate/validate.go b/bundle/config/validate/validate.go new file mode 100644 index 000000000..af7e984a1 --- /dev/null +++ b/bundle/config/validate/validate.go @@ -0,0 +1,43 @@ +package validate + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type validate struct { +} + +type location struct { + path string + rb bundle.ReadOnlyBundle +} + +func (l location) Location() dyn.Location { + return l.rb.Config().GetLocation(l.path) +} + +func (l location) Path() dyn.Path { + return dyn.MustPathFromString(l.path) +} + +// Apply implements bundle.Mutator. +func (v *validate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + return bundle.ApplyReadOnly(ctx, bundle.ReadOnly(b), bundle.Parallel( + JobClusterKeyDefined(), + FilesToSync(), + ValidateSyncPatterns(), + )) +} + +// Name implements bundle.Mutator. +func (v *validate) Name() string { + return "validate" +} + +func Validate() bundle.Mutator { + return &validate{} +} diff --git a/bundle/config/validate/validate_sync_patterns.go b/bundle/config/validate/validate_sync_patterns.go new file mode 100644 index 000000000..a04c10776 --- /dev/null +++ b/bundle/config/validate/validate_sync_patterns.go @@ -0,0 +1,79 @@ +package validate + +import ( + "context" + "fmt" + "sync" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/fileset" + "golang.org/x/sync/errgroup" +) + +func ValidateSyncPatterns() bundle.ReadOnlyMutator { + return &validateSyncPatterns{} +} + +type validateSyncPatterns struct { +} + +func (v *validateSyncPatterns) Name() string { + return "validate:validate_sync_patterns" +} + +func (v *validateSyncPatterns) Apply(ctx context.Context, rb bundle.ReadOnlyBundle) diag.Diagnostics { + s := rb.Config().Sync + if len(s.Exclude) == 0 && len(s.Include) == 0 { + return nil + } + + diags, err := checkPatterns(s.Exclude, "sync.exclude", rb) + if err != nil { + return diag.FromErr(err) + } + + includeDiags, err := checkPatterns(s.Include, "sync.include", rb) + if err != nil { + return diag.FromErr(err) + } + + return diags.Extend(includeDiags) +} + +func checkPatterns(patterns []string, path string, rb bundle.ReadOnlyBundle) (diag.Diagnostics, error) { + var mu sync.Mutex + var errs errgroup.Group + var diags diag.Diagnostics + + for i, pattern := range patterns { + index := i + p := pattern + errs.Go(func() error { + fs, err := fileset.NewGlobSet(rb.BundleRoot(), []string{p}) + if err != nil { + return err + } + + all, err := fs.All() + if err != nil { + return err + } + + if len(all) == 0 { + loc := location{path: fmt.Sprintf("%s[%d]", path, index), rb: rb} + mu.Lock() + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("Pattern %s does not match any files", p), + Location: loc.Location(), + Path: loc.Path(), + }) + mu.Unlock() + } + return nil + }) + } + + return diags, errs.Wait() +} diff --git a/bundle/config/variable/lookup.go b/bundle/config/variable/lookup.go index 3b29783eb..56d2ca810 100755 --- a/bundle/config/variable/lookup.go +++ b/bundle/config/variable/lookup.go @@ -297,7 +297,7 @@ func allResolvers() *resolvers { return "", err } - return fmt.Sprint(entity.Id), nil + return fmt.Sprint(entity.ApplicationId), nil } r.Warehouse = func(ctx context.Context, w *databricks.WorkspaceClient, name string) (string, error) { entity, err := w.Warehouses.GetByName(ctx, name) diff --git a/bundle/config/variable/variable.go b/bundle/config/variable/variable.go index 9057f1cb9..ba94f9c8a 100644 --- a/bundle/config/variable/variable.go +++ b/bundle/config/variable/variable.go @@ -2,14 +2,27 @@ package variable import ( "fmt" + "reflect" ) -const VariableReferencePrefix = "var" +// We are using `any` because since introduction of complex variables, +// variables can be of any type. +// Type alias is used to make it easier to understand the code. +type VariableValue = any + +type VariableType string + +const ( + VariableTypeComplex VariableType = "complex" +) // An input variable for the bundle config type Variable struct { + // A type of the variable. This is used to validate the value of the variable + Type VariableType `json:"type,omitempty"` + // A default value which then makes the variable optional - Default *string `json:"default,omitempty"` + Default VariableValue `json:"default,omitempty"` // Documentation for this input variable Description string `json:"description,omitempty"` @@ -23,7 +36,7 @@ type Variable struct { // 4. Default value defined in variable definition // 5. Throw error, since if no default value is defined, then the variable // is required - Value *string `json:"value,omitempty" bundle:"readonly"` + Value VariableValue `json:"value,omitempty" bundle:"readonly"` // The value of this field will be used to lookup the resource by name // And assign the value of the variable to ID of the resource found. @@ -41,10 +54,24 @@ func (v *Variable) HasValue() bool { return v.Value != nil } -func (v *Variable) Set(val string) error { +func (v *Variable) Set(val VariableValue) error { if v.HasValue() { - return fmt.Errorf("variable has already been assigned value: %s", *v.Value) + return fmt.Errorf("variable has already been assigned value: %s", v.Value) } - v.Value = &val + + rv := reflect.ValueOf(val) + switch rv.Kind() { + case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map: + if v.Type != VariableTypeComplex { + return fmt.Errorf("variable type is not complex") + } + } + + v.Value = val + return nil } + +func (v *Variable) IsComplex() bool { + return v.Type == VariableTypeComplex +} diff --git a/bundle/config/workspace.go b/bundle/config/workspace.go index 5f8691bab..efc5caa66 100644 --- a/bundle/config/workspace.go +++ b/bundle/config/workspace.go @@ -78,8 +78,8 @@ func (s User) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { - cfg := config.Config{ +func (w *Workspace) Config() *config.Config { + cfg := &config.Config{ // Generic Host: w.Host, Profile: w.Profile, @@ -101,6 +101,19 @@ func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { AzureLoginAppID: w.AzureLoginAppID, } + for k := range config.ConfigAttributes { + attr := &config.ConfigAttributes[k] + if !attr.IsZero(cfg) { + cfg.SetAttrSource(attr, config.Source{Type: config.SourceType("bundle")}) + } + } + + return cfg +} + +func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { + cfg := w.Config() + // If only the host is configured, we try and unambiguously match it to // a profile in the user's databrickscfg file. Override the default loaders. if w.Host != "" && w.Profile == "" { @@ -124,13 +137,13 @@ func (w *Workspace) Client() (*databricks.WorkspaceClient, error) { // Now that the configuration is resolved, we can verify that the host in the bundle configuration // is identical to the host associated with the selected profile. if w.Host != "" && w.Profile != "" { - err := databrickscfg.ValidateConfigAndProfileHost(&cfg, w.Profile) + err := databrickscfg.ValidateConfigAndProfileHost(cfg, w.Profile) if err != nil { return nil, err } } - return databricks.NewWorkspaceClient((*databricks.Config)(&cfg)) + return databricks.NewWorkspaceClient((*databricks.Config)(cfg)) } func init() { diff --git a/bundle/deferred.go b/bundle/deferred.go index 5f3351fcf..56c2bdca2 100644 --- a/bundle/deferred.go +++ b/bundle/deferred.go @@ -3,7 +3,7 @@ package bundle import ( "context" - "github.com/databricks/cli/libs/errs" + "github.com/databricks/cli/libs/diag" ) type DeferredMutator struct { @@ -22,12 +22,9 @@ func Defer(mutator Mutator, finally Mutator) Mutator { } } -func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) error { - mainErr := Apply(ctx, b, d.mutator) - errOnFinish := Apply(ctx, b, d.finally) - if mainErr != nil || errOnFinish != nil { - return errs.FromMany(mainErr, errOnFinish) - } - - return nil +func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { + var diags diag.Diagnostics + diags = diags.Extend(Apply(ctx, b, d.mutator)) + diags = diags.Extend(Apply(ctx, b, d.finally)) + return diags } diff --git a/bundle/deferred_test.go b/bundle/deferred_test.go index f75867d69..3abc4aa10 100644 --- a/bundle/deferred_test.go +++ b/bundle/deferred_test.go @@ -2,9 +2,9 @@ package bundle import ( "context" - "fmt" "testing" + "github.com/databricks/cli/libs/diag" "github.com/stretchr/testify/assert" ) @@ -17,9 +17,9 @@ func (t *mutatorWithError) Name() string { return "mutatorWithError" } -func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) error { +func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) diag.Diagnostics { t.applyCalled++ - return fmt.Errorf(t.errorMsg) + return diag.Errorf(t.errorMsg) } func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) { @@ -30,8 +30,8 @@ func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) { deferredMutator := Defer(Seq(m1, m2, m3), cleanup) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.NoError(t, err) + diags := Apply(context.Background(), b, deferredMutator) + assert.NoError(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) @@ -47,8 +47,8 @@ func TestDeferredMutatorWhenFirstFails(t *testing.T) { deferredMutator := Defer(Seq(mErr, m1, m2), cleanup) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.ErrorContains(t, err, "mutator error occurred") + diags := Apply(context.Background(), b, deferredMutator) + assert.ErrorContains(t, diags.Error(), "mutator error occurred") assert.Equal(t, 1, mErr.applyCalled) assert.Equal(t, 0, m1.applyCalled) @@ -64,8 +64,8 @@ func TestDeferredMutatorWhenMiddleOneFails(t *testing.T) { deferredMutator := Defer(Seq(m1, mErr, m2), cleanup) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.ErrorContains(t, err, "mutator error occurred") + diags := Apply(context.Background(), b, deferredMutator) + assert.ErrorContains(t, diags.Error(), "mutator error occurred") assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, mErr.applyCalled) @@ -81,8 +81,8 @@ func TestDeferredMutatorWhenLastOneFails(t *testing.T) { deferredMutator := Defer(Seq(m1, m2, mErr), cleanup) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.ErrorContains(t, err, "mutator error occurred") + diags := Apply(context.Background(), b, deferredMutator) + assert.ErrorContains(t, diags.Error(), "mutator error occurred") assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) @@ -98,8 +98,14 @@ func TestDeferredMutatorCombinesErrorMessages(t *testing.T) { deferredMutator := Defer(Seq(m1, m2, mErr), cleanupErr) b := &Bundle{} - err := Apply(context.Background(), b, deferredMutator) - assert.ErrorContains(t, err, "mutator error occurred\ncleanup error occurred") + diags := Apply(context.Background(), b, deferredMutator) + + var errs []string + for _, d := range diags { + errs = append(errs, d.Summary) + } + assert.Contains(t, errs, "mutator error occurred") + assert.Contains(t, errs, "cleanup error occurred") assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) diff --git a/bundle/deploy/filer.go b/bundle/deploy/filer.go new file mode 100644 index 000000000..c0fd839ef --- /dev/null +++ b/bundle/deploy/filer.go @@ -0,0 +1,14 @@ +package deploy + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/filer" +) + +// FilerFactory is a function that returns a filer.Filer. +type FilerFactory func(b *bundle.Bundle) (filer.Filer, error) + +// StateFiler returns a filer.Filer that can be used to read/write state files. +func StateFiler(b *bundle.Bundle) (filer.Filer, error) { + return filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) +} diff --git a/bundle/deploy/files/delete.go b/bundle/deploy/files/delete.go index 9f7ad4d41..133971449 100644 --- a/bundle/deploy/files/delete.go +++ b/bundle/deploy/files/delete.go @@ -2,10 +2,15 @@ package files import ( "context" + "errors" "fmt" + "io/fs" + "os" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/sync" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/fatih/color" ) @@ -16,7 +21,7 @@ func (m *delete) Name() string { return "files.Delete" } -func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Do not delete files if terraform destroy was not consented if !b.Plan.IsEmpty && !b.Plan.ConfirmApply { return nil @@ -29,7 +34,7 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error { if !b.AutoApprove { proceed, err := cmdio.AskYesOrNo(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?", b.Config.Workspace.RootPath, red("deleted permanently!"))) if err != nil { - return err + return diag.FromErr(err) } if !proceed { return nil @@ -41,24 +46,35 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error { Recursive: true, }) if err != nil { - return err + return diag.FromErr(err) } // Clean up sync snapshot file - sync, err := getSync(ctx, b) + err = deleteSnapshotFile(ctx, b) if err != nil { - return err - } - err = sync.DestroySnapshot(ctx) - if err != nil { - return err + return diag.FromErr(err) } - cmdio.LogString(ctx, fmt.Sprintf("Deleted snapshot file at %s", sync.SnapshotPath())) cmdio.LogString(ctx, "Successfully deleted files!") return nil } +func deleteSnapshotFile(ctx context.Context, b *bundle.Bundle) error { + opts, err := GetSyncOptions(ctx, bundle.ReadOnly(b)) + if err != nil { + return fmt.Errorf("cannot get sync options: %w", err) + } + sp, err := sync.SnapshotPath(opts) + if err != nil { + return err + } + err = os.Remove(sp) + if err != nil && !errors.Is(err, fs.ErrNotExist) { + return fmt.Errorf("failed to destroy sync snapshot file: %s", err) + } + return nil +} + func Delete() bundle.Mutator { return &delete{} } diff --git a/bundle/deploy/files/sync.go b/bundle/deploy/files/sync.go index 148a63ff6..a308668d3 100644 --- a/bundle/deploy/files/sync.go +++ b/bundle/deploy/files/sync.go @@ -8,28 +8,41 @@ import ( "github.com/databricks/cli/libs/sync" ) -func getSync(ctx context.Context, b *bundle.Bundle) (*sync.Sync, error) { - cacheDir, err := b.CacheDir(ctx) +func GetSync(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.Sync, error) { + opts, err := GetSyncOptions(ctx, rb) + if err != nil { + return nil, fmt.Errorf("cannot get sync options: %w", err) + } + return sync.New(ctx, *opts) +} + +func GetSyncOptions(ctx context.Context, rb bundle.ReadOnlyBundle) (*sync.SyncOptions, error) { + cacheDir, err := rb.CacheDir(ctx) if err != nil { return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) } - includes, err := b.GetSyncIncludePatterns(ctx) + includes, err := rb.GetSyncIncludePatterns(ctx) if err != nil { return nil, fmt.Errorf("cannot get list of sync includes: %w", err) } - opts := sync.SyncOptions{ - LocalPath: b.Config.Path, - RemotePath: b.Config.Workspace.FilePath, + opts := &sync.SyncOptions{ + LocalPath: rb.BundleRoot(), + RemotePath: rb.Config().Workspace.FilePath, Include: includes, - Exclude: b.Config.Sync.Exclude, + Exclude: rb.Config().Sync.Exclude, + Host: rb.WorkspaceClient().Config.Host, - Full: false, - CurrentUser: b.Config.Workspace.CurrentUser.User, + Full: false, SnapshotBasePath: cacheDir, - WorkspaceClient: b.WorkspaceClient(), + WorkspaceClient: rb.WorkspaceClient(), } - return sync.New(ctx, opts) + + if rb.Config().Workspace.CurrentUser != nil { + opts.CurrentUser = rb.Config().Workspace.CurrentUser.User + } + + return opts, nil } diff --git a/bundle/deploy/files/upload.go b/bundle/deploy/files/upload.go index 26d1ef4b5..2c126623e 100644 --- a/bundle/deploy/files/upload.go +++ b/bundle/deploy/files/upload.go @@ -6,6 +6,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -15,16 +16,16 @@ func (m *upload) Name() string { return "files.Upload" } -func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath)) - sync, err := getSync(ctx, b) + sync, err := GetSync(ctx, bundle.ReadOnly(b)) if err != nil { - return err + return diag.FromErr(err) } - err = sync.RunOnce(ctx) + b.Files, err = sync.RunOnce(ctx) if err != nil { - return err + return diag.FromErr(err) } log.Infof(ctx, "Uploaded bundle files") diff --git a/bundle/deploy/lock/acquire.go b/bundle/deploy/lock/acquire.go index 1335f7800..7d3d0eca8 100644 --- a/bundle/deploy/lock/acquire.go +++ b/bundle/deploy/lock/acquire.go @@ -3,9 +3,9 @@ package lock import ( "context" "errors" - "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" @@ -33,19 +33,19 @@ func (m *acquire) init(b *bundle.Bundle) error { return nil } -func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Return early if locking is disabled. - if !b.Config.Bundle.Lock.IsEnabled() { + if !b.Config.Bundle.Deployment.Lock.IsEnabled() { log.Infof(ctx, "Skipping; locking is disabled") return nil } err := m.init(b) if err != nil { - return err + return diag.FromErr(err) } - force := b.Config.Bundle.Lock.Force + force := b.Config.Bundle.Deployment.Lock.Force log.Infof(ctx, "Acquiring deployment lock (force: %v)", force) err = b.Locker.Lock(ctx, force) if err != nil { @@ -55,9 +55,9 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error { if errors.As(err, ¬ExistsError) { // If we get a "doesn't exist" error from the API this indicates // we either don't have permissions or the path is invalid. - return fmt.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath) + return diag.Errorf("cannot write to deployment root (this can indicate a previous deploy was done with a different identity): %s", b.Config.Workspace.RootPath) } - return err + return diag.FromErr(err) } return nil diff --git a/bundle/deploy/lock/release.go b/bundle/deploy/lock/release.go index 52d271943..26f95edfc 100644 --- a/bundle/deploy/lock/release.go +++ b/bundle/deploy/lock/release.go @@ -2,9 +2,9 @@ package lock import ( "context" - "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" ) @@ -12,6 +12,8 @@ import ( type Goal string const ( + GoalBind = Goal("bind") + GoalUnbind = Goal("unbind") GoalDeploy = Goal("deploy") GoalDestroy = Goal("destroy") ) @@ -28,9 +30,9 @@ func (m *release) Name() string { return "lock:release" } -func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *release) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // Return early if locking is disabled. - if !b.Config.Bundle.Lock.IsEnabled() { + if !b.Config.Bundle.Deployment.Lock.IsEnabled() { log.Infof(ctx, "Skipping; locking is disabled") return nil } @@ -45,10 +47,12 @@ func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "Releasing deployment lock") switch m.goal { case GoalDeploy: - return b.Locker.Unlock(ctx) + return diag.FromErr(b.Locker.Unlock(ctx)) + case GoalBind, GoalUnbind: + return diag.FromErr(b.Locker.Unlock(ctx)) case GoalDestroy: - return b.Locker.Unlock(ctx, locker.AllowLockFileNotExist) + return diag.FromErr(b.Locker.Unlock(ctx, locker.AllowLockFileNotExist)) default: - return fmt.Errorf("unknown goal for lock release: %s", m.goal) + return diag.Errorf("unknown goal for lock release: %s", m.goal) } } diff --git a/bundle/deploy/metadata/annotate_jobs.go b/bundle/deploy/metadata/annotate_jobs.go index 5b9ae5b88..f42d46931 100644 --- a/bundle/deploy/metadata/annotate_jobs.go +++ b/bundle/deploy/metadata/annotate_jobs.go @@ -2,9 +2,9 @@ package metadata import ( "context" - "path" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -18,7 +18,7 @@ func (m *annotateJobs) Name() string { return "metadata.AnnotateJobs" } -func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { for _, job := range b.Config.Resources.Jobs { if job.JobSettings == nil { continue @@ -26,9 +26,9 @@ func (m *annotateJobs) Apply(_ context.Context, b *bundle.Bundle) error { job.JobSettings.Deployment = &jobs.JobDeployment{ Kind: jobs.JobDeploymentKindBundle, - MetadataFilePath: path.Join(b.Config.Workspace.StatePath, MetadataFileName), + MetadataFilePath: metadataFilePath(b), } - job.JobSettings.EditMode = jobs.JobSettingsEditModeUiLocked + job.JobSettings.EditMode = jobs.JobEditModeUiLocked job.JobSettings.Format = jobs.FormatMultiTask } diff --git a/bundle/deploy/metadata/annotate_jobs_test.go b/bundle/deploy/metadata/annotate_jobs_test.go index c7a02e754..8dace4590 100644 --- a/bundle/deploy/metadata/annotate_jobs_test.go +++ b/bundle/deploy/metadata/annotate_jobs_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestAnnotateJobsMutator(t *testing.T) { @@ -34,8 +35,8 @@ func TestAnnotateJobsMutator(t *testing.T) { }, } - err := AnnotateJobs().Apply(context.Background(), b) - assert.NoError(t, err) + diags := AnnotateJobs().Apply(context.Background(), b) + require.NoError(t, diags.Error()) assert.Equal(t, &jobs.JobDeployment{ @@ -43,7 +44,7 @@ func TestAnnotateJobsMutator(t *testing.T) { MetadataFilePath: "/a/b/c/metadata.json", }, b.Config.Resources.Jobs["my-job-1"].JobSettings.Deployment) - assert.Equal(t, jobs.JobSettingsEditModeUiLocked, b.Config.Resources.Jobs["my-job-1"].EditMode) + assert.Equal(t, jobs.JobEditModeUiLocked, b.Config.Resources.Jobs["my-job-1"].EditMode) assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-1"].Format) assert.Equal(t, @@ -52,7 +53,7 @@ func TestAnnotateJobsMutator(t *testing.T) { MetadataFilePath: "/a/b/c/metadata.json", }, b.Config.Resources.Jobs["my-job-2"].JobSettings.Deployment) - assert.Equal(t, jobs.JobSettingsEditModeUiLocked, b.Config.Resources.Jobs["my-job-2"].EditMode) + assert.Equal(t, jobs.JobEditModeUiLocked, b.Config.Resources.Jobs["my-job-2"].EditMode) assert.Equal(t, jobs.FormatMultiTask, b.Config.Resources.Jobs["my-job-2"].Format) } @@ -67,6 +68,6 @@ func TestAnnotateJobsMutatorJobWithoutSettings(t *testing.T) { }, } - err := AnnotateJobs().Apply(context.Background(), b) - assert.NoError(t, err) + diags := AnnotateJobs().Apply(context.Background(), b) + require.NoError(t, diags.Error()) } diff --git a/bundle/deploy/metadata/annotate_pipelines.go b/bundle/deploy/metadata/annotate_pipelines.go new file mode 100644 index 000000000..990f48907 --- /dev/null +++ b/bundle/deploy/metadata/annotate_pipelines.go @@ -0,0 +1,34 @@ +package metadata + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/pipelines" +) + +type annotatePipelines struct{} + +func AnnotatePipelines() bundle.Mutator { + return &annotatePipelines{} +} + +func (m *annotatePipelines) Name() string { + return "metadata.AnnotatePipelines" +} + +func (m *annotatePipelines) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { + for _, pipeline := range b.Config.Resources.Pipelines { + if pipeline.PipelineSpec == nil { + continue + } + + pipeline.PipelineSpec.Deployment = &pipelines.PipelineDeployment{ + Kind: pipelines.DeploymentKindBundle, + MetadataFilePath: metadataFilePath(b), + } + } + + return nil +} diff --git a/bundle/deploy/metadata/annotate_pipelines_test.go b/bundle/deploy/metadata/annotate_pipelines_test.go new file mode 100644 index 000000000..448a022d0 --- /dev/null +++ b/bundle/deploy/metadata/annotate_pipelines_test.go @@ -0,0 +1,72 @@ +package metadata + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAnnotatePipelinesMutator(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + StatePath: "/a/b/c", + }, + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "my-pipeline-1": { + PipelineSpec: &pipelines.PipelineSpec{ + Name: "My Pipeline One", + }, + }, + "my-pipeline-2": { + PipelineSpec: &pipelines.PipelineSpec{ + Name: "My Pipeline Two", + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, AnnotatePipelines()) + require.NoError(t, diags.Error()) + + assert.Equal(t, + &pipelines.PipelineDeployment{ + Kind: pipelines.DeploymentKindBundle, + MetadataFilePath: "/a/b/c/metadata.json", + }, + b.Config.Resources.Pipelines["my-pipeline-1"].PipelineSpec.Deployment) + + assert.Equal(t, + &pipelines.PipelineDeployment{ + Kind: pipelines.DeploymentKindBundle, + MetadataFilePath: "/a/b/c/metadata.json", + }, + b.Config.Resources.Pipelines["my-pipeline-2"].PipelineSpec.Deployment) +} + +func TestAnnotatePipelinesMutatorPipelineWithoutASpec(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + StatePath: "/a/b/c", + }, + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "my-pipeline-1": {}, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, AnnotatePipelines()) + require.NoError(t, diags.Error()) +} diff --git a/bundle/deploy/metadata/compute.go b/bundle/deploy/metadata/compute.go index 460a81c93..034765484 100644 --- a/bundle/deploy/metadata/compute.go +++ b/bundle/deploy/metadata/compute.go @@ -2,11 +2,12 @@ package metadata import ( "context" - "fmt" "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/metadata" + "github.com/databricks/cli/libs/diag" ) type compute struct{} @@ -19,23 +20,28 @@ func (m *compute) Name() string { return "metadata.Compute" } -func (m *compute) Apply(_ context.Context, b *bundle.Bundle) error { +func (m *compute) Apply(_ context.Context, b *bundle.Bundle) diag.Diagnostics { b.Metadata = metadata.Metadata{ Version: metadata.Version, Config: metadata.Config{}, } - // Set git details in metadata - b.Metadata.Config.Bundle.Git = b.Config.Bundle.Git + // Set Git details in metadata + b.Metadata.Config.Bundle.Git = config.Git{ + Branch: b.Config.Bundle.Git.Branch, + OriginURL: b.Config.Bundle.Git.OriginURL, + Commit: b.Config.Bundle.Git.Commit, + BundleRootPath: b.Config.Bundle.Git.BundleRootPath, + } // Set job config paths in metadata jobsMetadata := make(map[string]*metadata.Job) for name, job := range b.Config.Resources.Jobs { // Compute config file path the job is defined in, relative to the bundle // root - relativePath, err := filepath.Rel(b.Config.Path, job.ConfigFilePath) + relativePath, err := filepath.Rel(b.RootPath, job.ConfigFilePath) if err != nil { - return fmt.Errorf("failed to compute relative path for job %s: %w", name, err) + return diag.Errorf("failed to compute relative path for job %s: %v", name, err) } // Metadata for the job jobsMetadata[name] = &metadata.Job{ diff --git a/bundle/deploy/metadata/compute_test.go b/bundle/deploy/metadata/compute_test.go index c3cb029d1..6d43f845b 100644 --- a/bundle/deploy/metadata/compute_test.go +++ b/bundle/deploy/metadata/compute_test.go @@ -6,8 +6,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/paths" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/bundletest" "github.com/databricks/cli/bundle/metadata" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/assert" @@ -30,23 +30,18 @@ func TestComputeMetadataMutator(t *testing.T) { OriginURL: "www.host.com", Commit: "abcd", BundleRootPath: "a/b/c/d", + Inferred: true, }, }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ "my-job-1": { - Paths: paths.Paths{ - ConfigFilePath: "a/b/c", - }, ID: "1111", JobSettings: &jobs.JobSettings{ Name: "My Job One", }, }, "my-job-2": { - Paths: paths.Paths{ - ConfigFilePath: "d/e/f", - }, ID: "2222", JobSettings: &jobs.JobSettings{ Name: "My Job Two", @@ -54,16 +49,16 @@ func TestComputeMetadataMutator(t *testing.T) { }, }, Pipelines: map[string]*resources.Pipeline{ - "my-pipeline": { - Paths: paths.Paths{ - ConfigFilePath: "abc", - }, - }, + "my-pipeline": {}, }, }, }, } + bundletest.SetLocation(b, "resources.jobs.my-job-1", "a/b/c") + bundletest.SetLocation(b, "resources.jobs.my-job-2", "d/e/f") + bundletest.SetLocation(b, "resources.pipelines.my-pipeline", "abc") + expectedMetadata := metadata.Metadata{ Version: metadata.Version, Config: metadata.Config{ @@ -76,6 +71,9 @@ func TestComputeMetadataMutator(t *testing.T) { OriginURL: "www.host.com", Commit: "abcd", BundleRootPath: "a/b/c/d", + + // Test that this field doesn't carry over into the metadata. + Inferred: false, }, }, Resources: metadata.Resources{ @@ -93,8 +91,8 @@ func TestComputeMetadataMutator(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, Compute()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, Compute()) + require.NoError(t, diags.Error()) assert.Equal(t, expectedMetadata, b.Metadata) } diff --git a/bundle/deploy/metadata/upload.go b/bundle/deploy/metadata/upload.go index f550a66e7..ee87816de 100644 --- a/bundle/deploy/metadata/upload.go +++ b/bundle/deploy/metadata/upload.go @@ -4,12 +4,18 @@ import ( "bytes" "context" "encoding/json" + "path" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" ) -const MetadataFileName = "metadata.json" +const metadataFileName = "metadata.json" + +func metadataFilePath(b *bundle.Bundle) string { + return path.Join(b.Config.Workspace.StatePath, metadataFileName) +} type upload struct{} @@ -21,16 +27,16 @@ func (m *upload) Name() string { return "metadata.Upload" } -func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) if err != nil { - return err + return diag.FromErr(err) } metadata, err := json.MarshalIndent(b.Metadata, "", " ") if err != nil { - return err + return diag.FromErr(err) } - return f.Write(ctx, MetadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists) + return diag.FromErr(f.Write(ctx, metadataFileName, bytes.NewReader(metadata), filer.CreateParentDirectories, filer.OverwriteIfExists)) } diff --git a/bundle/deploy/state.go b/bundle/deploy/state.go new file mode 100644 index 000000000..4f2bc4ee4 --- /dev/null +++ b/bundle/deploy/state.go @@ -0,0 +1,183 @@ +package deploy + +import ( + "context" + "encoding/json" + "fmt" + "io" + "io/fs" + "path/filepath" + "time" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" + "github.com/google/uuid" +) + +const DeploymentStateFileName = "deployment.json" +const DeploymentStateVersion = 1 + +type File struct { + LocalPath string `json:"local_path"` + + // If true, this file is a notebook. + // This property must be persisted because notebooks are stripped of their extension. + // If the local file is no longer present, we need to know what to remove on the workspace side. + IsNotebook bool `json:"is_notebook"` +} + +type Filelist []File + +type DeploymentState struct { + // Version is the version of the deployment state. + // To be incremented when the schema changes. + Version int64 `json:"version"` + + // Seq is the sequence number of the deployment state. + // This number is incremented on every deployment. + // It is used to detect if the deployment state is stale. + Seq int64 `json:"seq"` + + // CliVersion is the version of the CLI which created the deployment state. + CliVersion string `json:"cli_version"` + + // Timestamp is the time when the deployment state was created. + Timestamp time.Time `json:"timestamp"` + + // Files is a list of files which has been deployed as part of this deployment. + Files Filelist `json:"files"` + + // UUID uniquely identifying the deployment. + ID uuid.UUID `json:"id"` +} + +// We use this entry type as a proxy to fs.DirEntry. +// When we construct sync snapshot from deployment state, +// we use a fileset.File which embeds fs.DirEntry as the DirEntry field. +// Because we can't marshal/unmarshal fs.DirEntry directly, instead when we unmarshal +// the deployment state, we use this entry type to represent the fs.DirEntry in fileset.File instance. +type entry struct { + path string + info fs.FileInfo +} + +func newEntry(root vfs.Path, path string) *entry { + info, err := root.Stat(path) + if err != nil { + return &entry{path, nil} + } + + return &entry{path, info} +} + +func (e *entry) Name() string { + return filepath.Base(e.path) +} + +func (e *entry) IsDir() bool { + // If the entry is nil, it is a non-existent file so return false. + if e.info == nil { + return false + } + return e.info.IsDir() +} + +func (e *entry) Type() fs.FileMode { + // If the entry is nil, it is a non-existent file so return 0. + if e.info == nil { + return 0 + } + return e.info.Mode() +} + +func (e *entry) Info() (fs.FileInfo, error) { + if e.info == nil { + return nil, fmt.Errorf("no info available") + } + return e.info, nil +} + +func FromSlice(files []fileset.File) (Filelist, error) { + var f Filelist + for k := range files { + file := &files[k] + isNotebook, err := file.IsNotebook() + if err != nil { + return nil, err + } + f = append(f, File{ + LocalPath: file.Relative, + IsNotebook: isNotebook, + }) + } + return f, nil +} + +func (f Filelist) ToSlice(root vfs.Path) []fileset.File { + var files []fileset.File + for _, file := range f { + entry := newEntry(root, filepath.ToSlash(file.LocalPath)) + + // Snapshots created with versions <= v0.220.0 use platform-specific + // paths (i.e. with backslashes). Files returned by [libs/fileset] always + // contain forward slashes after this version. Normalize before using. + relative := filepath.ToSlash(file.LocalPath) + if file.IsNotebook { + files = append(files, fileset.NewNotebookFile(root, entry, relative)) + } else { + files = append(files, fileset.NewSourceFile(root, entry, relative)) + } + } + return files +} + +func isLocalStateStale(local io.Reader, remote io.Reader) bool { + localState, err := loadState(local) + if err != nil { + return true + } + + remoteState, err := loadState(remote) + if err != nil { + return false + } + + return localState.Seq < remoteState.Seq +} + +func validateRemoteStateCompatibility(remote io.Reader) error { + state, err := loadState(remote) + if err != nil { + return err + } + + // If the remote state version is greater than the CLI version, we can't proceed. + if state.Version > DeploymentStateVersion { + return fmt.Errorf("remote deployment state is incompatible with the current version of the CLI, please upgrade to at least %s", state.CliVersion) + } + + return nil +} + +func loadState(r io.Reader) (*DeploymentState, error) { + content, err := io.ReadAll(r) + if err != nil { + return nil, err + } + var s DeploymentState + err = json.Unmarshal(content, &s) + if err != nil { + return nil, err + } + + return &s, nil +} + +func getPathToStateFile(ctx context.Context, b *bundle.Bundle) (string, error) { + cacheDir, err := b.CacheDir(ctx) + if err != nil { + return "", fmt.Errorf("cannot get bundle cache directory: %w", err) + } + return filepath.Join(cacheDir, DeploymentStateFileName), nil +} diff --git a/bundle/deploy/state_pull.go b/bundle/deploy/state_pull.go new file mode 100644 index 000000000..24ed9d360 --- /dev/null +++ b/bundle/deploy/state_pull.go @@ -0,0 +1,127 @@ +package deploy + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/fs" + "os" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/files" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/sync" +) + +type statePull struct { + filerFactory FilerFactory +} + +func (s *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + f, err := s.filerFactory(b) + if err != nil { + return diag.FromErr(err) + } + + // Download deployment state file from filer to local cache directory. + log.Infof(ctx, "Opening remote deployment state file") + remote, err := s.remoteState(ctx, f) + if err != nil { + log.Infof(ctx, "Unable to open remote deployment state file: %s", err) + return diag.FromErr(err) + } + if remote == nil { + log.Infof(ctx, "Remote deployment state file does not exist") + return nil + } + + statePath, err := getPathToStateFile(ctx, b) + if err != nil { + return diag.FromErr(err) + } + + local, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR, 0600) + if err != nil { + return diag.FromErr(err) + } + defer local.Close() + + data := remote.Bytes() + err = validateRemoteStateCompatibility(bytes.NewReader(data)) + if err != nil { + return diag.FromErr(err) + } + + if !isLocalStateStale(local, bytes.NewReader(data)) { + log.Infof(ctx, "Local deployment state is the same or newer, ignoring remote state") + return nil + } + + // Truncating the file before writing + local.Truncate(0) + local.Seek(0, 0) + + // Write file to disk. + log.Infof(ctx, "Writing remote deployment state file to local cache directory") + _, err = io.Copy(local, bytes.NewReader(data)) + if err != nil { + return diag.FromErr(err) + } + + var state DeploymentState + err = json.Unmarshal(data, &state) + if err != nil { + return diag.FromErr(err) + } + + // Create a new snapshot based on the deployment state file. + opts, err := files.GetSyncOptions(ctx, bundle.ReadOnly(b)) + if err != nil { + return diag.FromErr(err) + } + + log.Infof(ctx, "Creating new snapshot") + snapshot, err := sync.NewSnapshot(state.Files.ToSlice(b.BundleRoot), opts) + if err != nil { + return diag.FromErr(err) + } + + // Persist the snapshot to disk. + log.Infof(ctx, "Persisting snapshot to disk") + return diag.FromErr(snapshot.Save(ctx)) +} + +func (s *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buffer, error) { + // Download deployment state file from filer to local cache directory. + remote, err := f.Read(ctx, DeploymentStateFileName) + if err != nil { + // On first deploy this file doesn't yet exist. + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + + defer remote.Close() + + var buf bytes.Buffer + _, err = io.Copy(&buf, remote) + if err != nil { + return nil, err + } + + return &buf, nil +} + +func (s *statePull) Name() string { + return "deploy:state-pull" +} + +// StatePull returns a mutator that pulls the deployment state from the Databricks workspace +func StatePull() bundle.Mutator { + return &statePull{StateFiler} +} diff --git a/bundle/deploy/state_pull_test.go b/bundle/deploy/state_pull_test.go new file mode 100644 index 000000000..38f0b4021 --- /dev/null +++ b/bundle/deploy/state_pull_test.go @@ -0,0 +1,461 @@ +package deploy + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/fs" + "os" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/deploy/files" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/sync" + "github.com/databricks/cli/libs/vfs" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +type snapshortStateExpectations struct { + localToRemoteNames map[string]string + remoteToLocalNames map[string]string +} + +type statePullExpectations struct { + seq int + filesInDevelopmentState []File + snapshotState *snapshortStateExpectations +} + +type statePullOpts struct { + files []File + seq int + localFiles []string + localNotebooks []string + expects statePullExpectations + withExistingSnapshot bool + localState *DeploymentState +} + +func testStatePull(t *testing.T, opts statePullOpts) { + s := &statePull{func(b *bundle.Bundle) (filer.Filer, error) { + f := mockfiler.NewMockFiler(t) + + deploymentStateData, err := json.Marshal(DeploymentState{ + Version: DeploymentStateVersion, + Seq: int64(opts.seq), + Files: opts.files, + }) + require.NoError(t, err) + + f.EXPECT().Read(mock.Anything, DeploymentStateFileName).Return(io.NopCloser(bytes.NewReader(deploymentStateData)), nil) + + return f, nil + }} + + tmpDir := t.TempDir() + b := &bundle.Bundle{ + RootPath: tmpDir, + BundleRoot: vfs.MustNew(tmpDir), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "test-user", + }, + }, + }, + }, + } + ctx := context.Background() + + for _, file := range opts.localFiles { + testutil.Touch(t, b.RootPath, "bar", file) + } + + for _, file := range opts.localNotebooks { + testutil.TouchNotebook(t, b.RootPath, "bar", file) + } + + if opts.withExistingSnapshot { + opts, err := files.GetSyncOptions(ctx, bundle.ReadOnly(b)) + require.NoError(t, err) + + snapshotPath, err := sync.SnapshotPath(opts) + require.NoError(t, err) + + err = os.WriteFile(snapshotPath, []byte("snapshot"), 0644) + require.NoError(t, err) + } + + if opts.localState != nil { + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + data, err := json.Marshal(opts.localState) + require.NoError(t, err) + + err = os.WriteFile(statePath, data, 0644) + require.NoError(t, err) + } + + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) + + // Check that deployment state was written + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + data, err := os.ReadFile(statePath) + require.NoError(t, err) + + var state DeploymentState + err = json.Unmarshal(data, &state) + require.NoError(t, err) + + require.Equal(t, int64(opts.expects.seq), state.Seq) + require.Len(t, state.Files, len(opts.expects.filesInDevelopmentState)) + for i, file := range opts.expects.filesInDevelopmentState { + require.Equal(t, file.LocalPath, state.Files[i].LocalPath) + } + + if opts.expects.snapshotState != nil { + syncOpts, err := files.GetSyncOptions(ctx, bundle.ReadOnly(b)) + require.NoError(t, err) + + snapshotPath, err := sync.SnapshotPath(syncOpts) + require.NoError(t, err) + + _, err = os.Stat(snapshotPath) + require.NoError(t, err) + + data, err = os.ReadFile(snapshotPath) + require.NoError(t, err) + + var snapshot sync.Snapshot + err = json.Unmarshal(data, &snapshot) + require.NoError(t, err) + + snapshotState := snapshot.SnapshotState + require.Len(t, snapshotState.LocalToRemoteNames, len(opts.expects.snapshotState.localToRemoteNames)) + for local, remote := range opts.expects.snapshotState.localToRemoteNames { + require.Equal(t, remote, snapshotState.LocalToRemoteNames[local]) + } + + require.Len(t, snapshotState.RemoteToLocalNames, len(opts.expects.snapshotState.remoteToLocalNames)) + for remote, local := range opts.expects.snapshotState.remoteToLocalNames { + require.Equal(t, local, snapshotState.RemoteToLocalNames[remote]) + } + } +} + +var stateFiles []File = []File{ + { + LocalPath: "bar/t1.py", + IsNotebook: false, + }, + { + LocalPath: "bar/t2.py", + IsNotebook: false, + }, + { + LocalPath: "bar/notebook.py", + IsNotebook: true, + }, +} + +func TestStatePull(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + localNotebooks: []string{"notebook.py"}, + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullSnapshotExists(t *testing.T) { + testStatePull(t, statePullOpts{ + withExistingSnapshot: true, + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullNoState(t *testing.T) { + s := &statePull{func(b *bundle.Bundle) (filer.Filer, error) { + f := mockfiler.NewMockFiler(t) + + f.EXPECT().Read(mock.Anything, DeploymentStateFileName).Return(nil, os.ErrNotExist) + + return f, nil + }} + + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + }, + }, + } + ctx := context.Background() + + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) + + // Check that deployment state was not written + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + _, err = os.Stat(statePath) + require.True(t, errors.Is(err, fs.ErrNotExist)) +} + +func TestStatePullOlderState(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + localNotebooks: []string{"notebook.py"}, + localState: &DeploymentState{ + Version: DeploymentStateVersion, + Seq: 2, + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + }, + expects: statePullExpectations{ + seq: 2, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + }, + }) +} + +func TestStatePullNewerState(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + localNotebooks: []string{"notebook.py"}, + localState: &DeploymentState{ + Version: DeploymentStateVersion, + Seq: 0, + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + }, + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullAndFileIsRemovedLocally(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t2.py"}, // t1.py is removed locally + localNotebooks: []string{"notebook.py"}, + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullAndNotebookIsRemovedLocally(t *testing.T) { + testStatePull(t, statePullOpts{ + seq: 1, + files: stateFiles, + localFiles: []string{"t1.py", "t2.py"}, + localNotebooks: []string{}, // notebook.py is removed locally + expects: statePullExpectations{ + seq: 1, + filesInDevelopmentState: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + { + LocalPath: "bar/notebook.py", + }, + }, + snapshotState: &snapshortStateExpectations{ + localToRemoteNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook.py": "bar/notebook", + }, + remoteToLocalNames: map[string]string{ + "bar/t1.py": "bar/t1.py", + "bar/t2.py": "bar/t2.py", + "bar/notebook": "bar/notebook.py", + }, + }, + }, + }) +} + +func TestStatePullNewerDeploymentStateVersion(t *testing.T) { + s := &statePull{func(b *bundle.Bundle) (filer.Filer, error) { + f := mockfiler.NewMockFiler(t) + + deploymentStateData, err := json.Marshal(DeploymentState{ + Version: DeploymentStateVersion + 1, + Seq: 1, + CliVersion: "1.2.3", + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + { + LocalPath: "bar/t2.py", + }, + }, + }) + require.NoError(t, err) + + f.EXPECT().Read(mock.Anything, DeploymentStateFileName).Return(io.NopCloser(bytes.NewReader(deploymentStateData)), nil) + + return f, nil + }} + + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + }, + }, + } + ctx := context.Background() + + diags := bundle.Apply(ctx, b, s) + require.True(t, diags.HasError()) + require.ErrorContains(t, diags.Error(), "remote deployment state is incompatible with the current version of the CLI, please upgrade to at least 1.2.3") +} diff --git a/bundle/deploy/state_push.go b/bundle/deploy/state_push.go new file mode 100644 index 000000000..176a907c8 --- /dev/null +++ b/bundle/deploy/state_push.go @@ -0,0 +1,50 @@ +package deploy + +import ( + "context" + "os" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/filer" + "github.com/databricks/cli/libs/log" +) + +type statePush struct { + filerFactory FilerFactory +} + +func (s *statePush) Name() string { + return "deploy:state-push" +} + +func (s *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + f, err := s.filerFactory(b) + if err != nil { + return diag.FromErr(err) + } + + statePath, err := getPathToStateFile(ctx, b) + if err != nil { + return diag.FromErr(err) + } + + local, err := os.Open(statePath) + if err != nil { + return diag.FromErr(err) + } + defer local.Close() + + log.Infof(ctx, "Writing local deployment state file to remote state directory") + err = f.Write(ctx, DeploymentStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists) + if err != nil { + return diag.FromErr(err) + } + + return nil +} + +// StatePush returns a mutator that pushes the deployment state file to Databricks workspace. +func StatePush() bundle.Mutator { + return &statePush{StateFiler} +} diff --git a/bundle/deploy/state_push_test.go b/bundle/deploy/state_push_test.go new file mode 100644 index 000000000..39e4d13a5 --- /dev/null +++ b/bundle/deploy/state_push_test.go @@ -0,0 +1,82 @@ +package deploy + +import ( + "context" + "encoding/json" + "io" + "os" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestStatePush(t *testing.T) { + s := &statePush{func(b *bundle.Bundle) (filer.Filer, error) { + f := mockfiler.NewMockFiler(t) + + f.EXPECT().Write(mock.Anything, DeploymentStateFileName, mock.MatchedBy(func(r *os.File) bool { + bytes, err := io.ReadAll(r) + if err != nil { + return false + } + + var state DeploymentState + err = json.Unmarshal(bytes, &state) + if err != nil { + return false + } + + if state.Seq != 1 { + return false + } + + if len(state.Files) != 1 { + return false + } + + return true + }), filer.CreateParentDirectories, filer.OverwriteIfExists).Return(nil) + return f, nil + }} + + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + }, + }, + } + + ctx := context.Background() + + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + state := DeploymentState{ + Version: DeploymentStateVersion, + Seq: 1, + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + } + + data, err := json.Marshal(state) + require.NoError(t, err) + + err = os.WriteFile(statePath, data, 0644) + require.NoError(t, err) + + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) +} diff --git a/bundle/deploy/state_test.go b/bundle/deploy/state_test.go new file mode 100644 index 000000000..5e1e54230 --- /dev/null +++ b/bundle/deploy/state_test.go @@ -0,0 +1,73 @@ +package deploy + +import ( + "bytes" + "encoding/json" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" + "github.com/stretchr/testify/require" +) + +func TestFromSlice(t *testing.T) { + tmpDir := t.TempDir() + fileset := fileset.New(vfs.MustNew(tmpDir)) + testutil.Touch(t, tmpDir, "test1.py") + testutil.Touch(t, tmpDir, "test2.py") + testutil.Touch(t, tmpDir, "test3.py") + + files, err := fileset.All() + require.NoError(t, err) + + f, err := FromSlice(files) + require.NoError(t, err) + require.Len(t, f, 3) + + for _, file := range f { + require.Contains(t, []string{"test1.py", "test2.py", "test3.py"}, file.LocalPath) + } +} + +func TestToSlice(t *testing.T) { + tmpDir := t.TempDir() + root := vfs.MustNew(tmpDir) + fileset := fileset.New(root) + testutil.Touch(t, tmpDir, "test1.py") + testutil.Touch(t, tmpDir, "test2.py") + testutil.Touch(t, tmpDir, "test3.py") + + files, err := fileset.All() + require.NoError(t, err) + + f, err := FromSlice(files) + require.NoError(t, err) + require.Len(t, f, 3) + + s := f.ToSlice(root) + require.Len(t, s, 3) + + for _, file := range s { + require.Contains(t, []string{"test1.py", "test2.py", "test3.py"}, file.Relative) + + // If the mtime is not zero we know we produced a valid fs.DirEntry. + ts := file.Modified() + require.NotZero(t, ts) + } +} + +func TestIsLocalStateStale(t *testing.T) { + oldState, err := json.Marshal(DeploymentState{ + Seq: 1, + }) + require.NoError(t, err) + + newState, err := json.Marshal(DeploymentState{ + Seq: 2, + }) + require.NoError(t, err) + + require.True(t, isLocalStateStale(bytes.NewReader(oldState), bytes.NewReader(newState))) + require.False(t, isLocalStateStale(bytes.NewReader(newState), bytes.NewReader(oldState))) +} diff --git a/bundle/deploy/state_update.go b/bundle/deploy/state_update.go new file mode 100644 index 000000000..9ab1bacf1 --- /dev/null +++ b/bundle/deploy/state_update.go @@ -0,0 +1,105 @@ +package deploy + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "io" + "io/fs" + "os" + "time" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/log" + "github.com/google/uuid" +) + +type stateUpdate struct { +} + +func (s *stateUpdate) Name() string { + return "deploy:state-update" +} + +func (s *stateUpdate) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + state, err := load(ctx, b) + if err != nil { + return diag.FromErr(err) + } + + // Increment the state sequence. + state.Seq = state.Seq + 1 + + // Update timestamp. + state.Timestamp = time.Now().UTC() + + // Update the CLI version and deployment state version. + state.CliVersion = build.GetInfo().Version + state.Version = DeploymentStateVersion + + // Update the state with the current list of synced files. + fl, err := FromSlice(b.Files) + if err != nil { + return diag.FromErr(err) + } + state.Files = fl + + // Generate a UUID for the deployment, if one does not already exist + if state.ID == uuid.Nil { + state.ID = uuid.New() + } + + statePath, err := getPathToStateFile(ctx, b) + if err != nil { + return diag.FromErr(err) + } + // Write the state back to the file. + f, err := os.OpenFile(statePath, os.O_CREATE|os.O_RDWR|os.O_TRUNC, 0600) + if err != nil { + log.Infof(ctx, "Unable to open deployment state file: %s", err) + return diag.FromErr(err) + } + defer f.Close() + + data, err := json.Marshal(state) + if err != nil { + return diag.FromErr(err) + } + + _, err = io.Copy(f, bytes.NewReader(data)) + if err != nil { + return diag.FromErr(err) + } + + return nil +} + +func StateUpdate() bundle.Mutator { + return &stateUpdate{} +} + +func load(ctx context.Context, b *bundle.Bundle) (*DeploymentState, error) { + // If the file does not exist, return a new DeploymentState. + statePath, err := getPathToStateFile(ctx, b) + if err != nil { + return nil, err + } + + log.Infof(ctx, "Loading deployment state from %s", statePath) + f, err := os.Open(statePath) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + log.Infof(ctx, "No deployment state file found") + return &DeploymentState{ + Version: DeploymentStateVersion, + CliVersion: build.GetInfo().Version, + }, nil + } + return nil, err + } + defer f.Close() + return loadState(f) +} diff --git a/bundle/deploy/state_update_test.go b/bundle/deploy/state_update_test.go new file mode 100644 index 000000000..2982546d5 --- /dev/null +++ b/bundle/deploy/state_update_test.go @@ -0,0 +1,146 @@ +package deploy + +import ( + "context" + "encoding/json" + "os" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/build" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func setupBundleForStateUpdate(t *testing.T) *bundle.Bundle { + tmpDir := t.TempDir() + + testutil.Touch(t, tmpDir, "test1.py") + testutil.TouchNotebook(t, tmpDir, "test2.py") + + files, err := fileset.New(vfs.MustNew(tmpDir)).All() + require.NoError(t, err) + + return &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Bundle: config.Bundle{ + Target: "default", + }, + Workspace: config.Workspace{ + StatePath: "/state", + FilePath: "/files", + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "test-user", + }, + }, + }, + }, + Files: files, + } +} + +func TestStateUpdate(t *testing.T) { + s := &stateUpdate{} + + b := setupBundleForStateUpdate(t) + ctx := context.Background() + + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) + + // Check that the state file was updated. + state, err := load(ctx, b) + require.NoError(t, err) + + require.Equal(t, int64(1), state.Seq) + require.Equal(t, state.Files, Filelist{ + { + LocalPath: "test1.py", + }, + { + LocalPath: "test2.py", + IsNotebook: true, + }, + }) + require.Equal(t, build.GetInfo().Version, state.CliVersion) + + diags = bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) + + // Check that the state file was updated again. + state, err = load(ctx, b) + require.NoError(t, err) + + require.Equal(t, int64(2), state.Seq) + require.Equal(t, state.Files, Filelist{ + { + LocalPath: "test1.py", + }, + { + LocalPath: "test2.py", + IsNotebook: true, + }, + }) + require.Equal(t, build.GetInfo().Version, state.CliVersion) + + // Valid non-empty UUID is generated. + require.NotEqual(t, uuid.Nil, state.ID) +} + +func TestStateUpdateWithExistingState(t *testing.T) { + s := &stateUpdate{} + + b := setupBundleForStateUpdate(t) + ctx := context.Background() + + // Create an existing state file. + statePath, err := getPathToStateFile(ctx, b) + require.NoError(t, err) + + state := &DeploymentState{ + Version: DeploymentStateVersion, + Seq: 10, + CliVersion: build.GetInfo().Version, + Files: []File{ + { + LocalPath: "bar/t1.py", + }, + }, + ID: uuid.MustParse("123e4567-e89b-12d3-a456-426614174000"), + } + + data, err := json.Marshal(state) + require.NoError(t, err) + + err = os.WriteFile(statePath, data, 0644) + require.NoError(t, err) + + diags := bundle.Apply(ctx, b, s) + require.NoError(t, diags.Error()) + + // Check that the state file was updated. + state, err = load(ctx, b) + require.NoError(t, err) + + require.Equal(t, int64(11), state.Seq) + require.Equal(t, state.Files, Filelist{ + { + LocalPath: "test1.py", + }, + { + LocalPath: "test2.py", + IsNotebook: true, + }, + }) + require.Equal(t, build.GetInfo().Version, state.CliVersion) + + // Existing UUID is not overwritten. + require.Equal(t, uuid.MustParse("123e4567-e89b-12d3-a456-426614174000"), state.ID) +} diff --git a/bundle/deploy/terraform/apply.go b/bundle/deploy/terraform/apply.go index 117cdfc18..e4acda852 100644 --- a/bundle/deploy/terraform/apply.go +++ b/bundle/deploy/terraform/apply.go @@ -2,10 +2,10 @@ package terraform import ( "context" - "fmt" "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" "github.com/hashicorp/terraform-exec/tfexec" ) @@ -16,22 +16,22 @@ func (w *apply) Name() string { return "terraform.Apply" } -func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) error { +func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } cmdio.LogString(ctx, "Deploying resources...") err := tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } err = tf.Apply(ctx) if err != nil { - return fmt.Errorf("terraform apply: %w", err) + return diag.Errorf("terraform apply: %v", err) } log.Infof(ctx, "Resource deployment completed") diff --git a/bundle/deploy/terraform/check_running_resources.go b/bundle/deploy/terraform/check_running_resources.go new file mode 100644 index 000000000..737f773e5 --- /dev/null +++ b/bundle/deploy/terraform/check_running_resources.go @@ -0,0 +1,129 @@ +package terraform + +import ( + "context" + "fmt" + "strconv" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" + tfjson "github.com/hashicorp/terraform-json" + "golang.org/x/sync/errgroup" +) + +type ErrResourceIsRunning struct { + resourceType string + resourceId string +} + +func (e ErrResourceIsRunning) Error() string { + return fmt.Sprintf("%s %s is running", e.resourceType, e.resourceId) +} + +type checkRunningResources struct { +} + +func (l *checkRunningResources) Name() string { + return "check-running-resources" +} + +func (l *checkRunningResources) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + if !b.Config.Bundle.Deployment.FailOnActiveRuns { + return nil + } + + state, err := ParseResourcesState(ctx, b) + if err != nil && state == nil { + return diag.FromErr(err) + } + + w := b.WorkspaceClient() + err = checkAnyResourceRunning(ctx, w, state) + if err != nil { + return diag.FromErr(err) + } + return nil +} + +func CheckRunningResource() *checkRunningResources { + return &checkRunningResources{} +} + +func checkAnyResourceRunning(ctx context.Context, w *databricks.WorkspaceClient, state *resourcesState) error { + if state == nil { + return nil + } + + errs, errCtx := errgroup.WithContext(ctx) + + for _, resource := range state.Resources { + if resource.Mode != tfjson.ManagedResourceMode { + continue + } + for _, instance := range resource.Instances { + id := instance.Attributes.ID + if id == "" { + continue + } + + switch resource.Type { + case "databricks_job": + errs.Go(func() error { + isRunning, err := IsJobRunning(errCtx, w, id) + // If there's an error retrieving the job, we assume it's not running + if err != nil { + return err + } + if isRunning { + return &ErrResourceIsRunning{resourceType: "job", resourceId: id} + } + return nil + }) + case "databricks_pipeline": + errs.Go(func() error { + isRunning, err := IsPipelineRunning(errCtx, w, id) + // If there's an error retrieving the pipeline, we assume it's not running + if err != nil { + return nil + } + if isRunning { + return &ErrResourceIsRunning{resourceType: "pipeline", resourceId: id} + } + return nil + }) + } + } + } + + return errs.Wait() +} + +func IsJobRunning(ctx context.Context, w *databricks.WorkspaceClient, jobId string) (bool, error) { + id, err := strconv.Atoi(jobId) + if err != nil { + return false, err + } + + runs, err := w.Jobs.ListRunsAll(ctx, jobs.ListRunsRequest{JobId: int64(id), ActiveOnly: true}) + if err != nil { + return false, err + } + + return len(runs) > 0, nil +} + +func IsPipelineRunning(ctx context.Context, w *databricks.WorkspaceClient, pipelineId string) (bool, error) { + resp, err := w.Pipelines.Get(ctx, pipelines.GetPipelineRequest{PipelineId: pipelineId}) + if err != nil { + return false, err + } + switch resp.State { + case pipelines.PipelineStateIdle, pipelines.PipelineStateFailed, pipelines.PipelineStateDeleted: + return false, nil + default: + return true, nil + } +} diff --git a/bundle/deploy/terraform/check_running_resources_test.go b/bundle/deploy/terraform/check_running_resources_test.go new file mode 100644 index 000000000..a1bbbd37b --- /dev/null +++ b/bundle/deploy/terraform/check_running_resources_test.go @@ -0,0 +1,114 @@ +package terraform + +import ( + "context" + "errors" + "testing" + + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestIsAnyResourceRunningWithEmptyState(t *testing.T) { + mock := mocks.NewMockWorkspaceClient(t) + err := checkAnyResourceRunning(context.Background(), mock.WorkspaceClient, &resourcesState{}) + require.NoError(t, err) +} + +func TestIsAnyResourceRunningWithJob(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + resources := &resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_job", + Mode: "managed", + Name: "job1", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "123"}}, + }, + }, + }, + } + + jobsApi := m.GetMockJobsAPI() + jobsApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + JobId: 123, + ActiveOnly: true, + }).Return([]jobs.BaseRun{ + {RunId: 1234}, + }, nil).Once() + + err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) + require.ErrorContains(t, err, "job 123 is running") + + jobsApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + JobId: 123, + ActiveOnly: true, + }).Return([]jobs.BaseRun{}, nil).Once() + + err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) + require.NoError(t, err) +} + +func TestIsAnyResourceRunningWithPipeline(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + resources := &resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "pipeline1", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "123"}}, + }, + }, + }, + } + + pipelineApi := m.GetMockPipelinesAPI() + pipelineApi.EXPECT().Get(mock.Anything, pipelines.GetPipelineRequest{ + PipelineId: "123", + }).Return(&pipelines.GetPipelineResponse{ + PipelineId: "123", + State: pipelines.PipelineStateRunning, + }, nil).Once() + + err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) + require.ErrorContains(t, err, "pipeline 123 is running") + + pipelineApi.EXPECT().Get(mock.Anything, pipelines.GetPipelineRequest{ + PipelineId: "123", + }).Return(&pipelines.GetPipelineResponse{ + PipelineId: "123", + State: pipelines.PipelineStateIdle, + }, nil).Once() + err = checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) + require.NoError(t, err) +} + +func TestIsAnyResourceRunningWithAPIFailure(t *testing.T) { + m := mocks.NewMockWorkspaceClient(t) + resources := &resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "pipeline1", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "123"}}, + }, + }, + }, + } + + pipelineApi := m.GetMockPipelinesAPI() + pipelineApi.EXPECT().Get(mock.Anything, pipelines.GetPipelineRequest{ + PipelineId: "123", + }).Return(nil, errors.New("API failure")).Once() + + err := checkAnyResourceRunning(context.Background(), m.WorkspaceClient, resources) + require.NoError(t, err) +} diff --git a/bundle/deploy/terraform/convert.go b/bundle/deploy/terraform/convert.go index 6723caee3..a6ec04d9a 100644 --- a/bundle/deploy/terraform/convert.go +++ b/bundle/deploy/terraform/convert.go @@ -1,13 +1,15 @@ package terraform import ( + "context" "encoding/json" "fmt" - "reflect" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/deploy/terraform/tfdyn" "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" tfjson "github.com/hashicorp/terraform-json" ) @@ -16,15 +18,6 @@ func conv(from any, to any) { json.Unmarshal(buf, &to) } -func convRemoteToLocal(remote any, local any) resources.ModifiedStatus { - var modifiedStatus resources.ModifiedStatus - if reflect.ValueOf(local).Elem().IsNil() { - modifiedStatus = resources.ModifiedStatusDeleted - } - conv(remote, local) - return modifiedStatus -} - func convPermissions(acl []resources.Permission) *schema.ResourcePermissions { if len(acl) == 0 { return nil @@ -97,6 +90,16 @@ func BundleToTerraform(config *config.Root) *schema.Root { t.Library = append(t.Library, l) } + // Convert for_each_task libraries + if v.ForEachTask != nil { + for _, v_ := range v.ForEachTask.Task.Libraries { + var l schema.ResourceJobTaskForEachTaskTaskLibrary + conv(v_, &l) + t.ForEachTask.Task.Library = append(t.ForEachTask.Task.Library, l) + } + + } + dst.Task = append(dst.Task, t) } @@ -219,6 +222,13 @@ func BundleToTerraform(config *config.Root) *schema.Root { } } + for k, src := range config.Resources.QualityMonitors { + noResources = false + var dst schema.ResourceQualityMonitor + conv(src, &dst) + tfroot.Resource.QualityMonitor[k] = &dst + } + // We explicitly set "resource" to nil to omit it from a JSON encoding. // This is required because the terraform CLI requires >= 1 resources defined // if the "resource" property is used in a .tf.json file. @@ -228,76 +238,150 @@ func BundleToTerraform(config *config.Root) *schema.Root { return tfroot } -func TerraformToBundle(state *tfjson.State, config *config.Root) error { - if state.Values != nil && state.Values.RootModule != nil { - for _, resource := range state.Values.RootModule.Resources { - // Limit to resources. - if resource.Mode != tfjson.ManagedResourceMode { - continue - } +// BundleToTerraformWithDynValue converts resources in a bundle configuration +// to the equivalent Terraform JSON representation. +func BundleToTerraformWithDynValue(ctx context.Context, root dyn.Value) (*schema.Root, error) { + tfroot := schema.NewRoot() + tfroot.Provider = schema.NewProviders() + // Convert each resource in the bundle to the equivalent Terraform representation. + dynResources, err := dyn.Get(root, "resources") + if err != nil { + // If the resources key is missing, return an empty root. + if dyn.IsNoSuchKeyError(err) { + return tfroot, nil + } + return nil, err + } + + tfroot.Resource = schema.NewResources() + + numResources := 0 + _, err = dyn.Walk(dynResources, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + if len(p) < 2 { + return v, nil + } + + // Skip resources that have been deleted locally. + modifiedStatus, err := dyn.Get(v, "modified_status") + if err == nil { + modifiedStatusStr, ok := modifiedStatus.AsString() + if ok && modifiedStatusStr == resources.ModifiedStatusDeleted { + return v, dyn.ErrSkip + } + } + + typ := p[0].Key() + key := p[1].Key() + + // Lookup the converter based on the resource type. + c, ok := tfdyn.GetConverter(typ) + if !ok { + return dyn.InvalidValue, fmt.Errorf("no converter for resource type %s", typ) + } + + // Convert resource to Terraform representation. + err = c.Convert(ctx, key, v, tfroot.Resource) + if err != nil { + return dyn.InvalidValue, err + } + + numResources++ + + // Skip traversal of the resource itself. + return v, dyn.ErrSkip + }) + if err != nil { + return nil, err + } + + // We explicitly set "resource" to nil to omit it from a JSON encoding. + // This is required because the terraform CLI requires >= 1 resources defined + // if the "resource" property is used in a .tf.json file. + if numResources == 0 { + tfroot.Resource = nil + } + + return tfroot, nil +} + +func TerraformToBundle(state *resourcesState, config *config.Root) error { + for _, resource := range state.Resources { + if resource.Mode != tfjson.ManagedResourceMode { + continue + } + for _, instance := range resource.Instances { switch resource.Type { case "databricks_job": - var tmp schema.ResourceJob - conv(resource.AttributeValues, &tmp) if config.Resources.Jobs == nil { config.Resources.Jobs = make(map[string]*resources.Job) } cur := config.Resources.Jobs[resource.Name] - // TODO: make sure we can unmarshall tf state properly and don't swallow errors - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.Job{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.Jobs[resource.Name] = cur case "databricks_pipeline": - var tmp schema.ResourcePipeline - conv(resource.AttributeValues, &tmp) if config.Resources.Pipelines == nil { config.Resources.Pipelines = make(map[string]*resources.Pipeline) } cur := config.Resources.Pipelines[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.Pipeline{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.Pipelines[resource.Name] = cur case "databricks_mlflow_model": - var tmp schema.ResourceMlflowModel - conv(resource.AttributeValues, &tmp) if config.Resources.Models == nil { config.Resources.Models = make(map[string]*resources.MlflowModel) } cur := config.Resources.Models[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.MlflowModel{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.Models[resource.Name] = cur case "databricks_mlflow_experiment": - var tmp schema.ResourceMlflowExperiment - conv(resource.AttributeValues, &tmp) if config.Resources.Experiments == nil { config.Resources.Experiments = make(map[string]*resources.MlflowExperiment) } cur := config.Resources.Experiments[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.MlflowExperiment{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.Experiments[resource.Name] = cur case "databricks_model_serving": - var tmp schema.ResourceModelServing - conv(resource.AttributeValues, &tmp) if config.Resources.ModelServingEndpoints == nil { config.Resources.ModelServingEndpoints = make(map[string]*resources.ModelServingEndpoint) } cur := config.Resources.ModelServingEndpoints[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.ModelServingEndpoint{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.ModelServingEndpoints[resource.Name] = cur case "databricks_registered_model": - var tmp schema.ResourceRegisteredModel - conv(resource.AttributeValues, &tmp) if config.Resources.RegisteredModels == nil { config.Resources.RegisteredModels = make(map[string]*resources.RegisteredModel) } cur := config.Resources.RegisteredModels[resource.Name] - modifiedStatus := convRemoteToLocal(tmp, &cur) - cur.ModifiedStatus = modifiedStatus + if cur == nil { + cur = &resources.RegisteredModel{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID config.Resources.RegisteredModels[resource.Name] = cur + case "databricks_quality_monitor": + if config.Resources.QualityMonitors == nil { + config.Resources.QualityMonitors = make(map[string]*resources.QualityMonitor) + } + cur := config.Resources.QualityMonitors[resource.Name] + if cur == nil { + cur = &resources.QualityMonitor{ModifiedStatus: resources.ModifiedStatusDeleted} + } + cur.ID = instance.Attributes.ID + config.Resources.QualityMonitors[resource.Name] = cur case "databricks_permissions": case "databricks_grants": // Ignore; no need to pull these back into the configuration. @@ -337,6 +421,11 @@ func TerraformToBundle(state *tfjson.State, config *config.Root) error { src.ModifiedStatus = resources.ModifiedStatusCreated } } + for _, src := range config.Resources.QualityMonitors { + if src.ModifiedStatus == "" && src.ID == "" { + src.ModifiedStatus = resources.ModifiedStatusCreated + } + } return nil } diff --git a/bundle/deploy/terraform/convert_test.go b/bundle/deploy/terraform/convert_test.go index bb77f287b..7ea448538 100644 --- a/bundle/deploy/terraform/convert_test.go +++ b/bundle/deploy/terraform/convert_test.go @@ -1,18 +1,22 @@ package terraform import ( + "context" + "encoding/json" "reflect" "testing" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/ml" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/serving" - tfjson "github.com/hashicorp/terraform-json" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -24,7 +28,7 @@ func TestBundleToTerraformJob(t *testing.T) { JobClusters: []jobs.JobCluster{ { JobClusterKey: "key", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "10.4.x-scala2.12", }, }, @@ -55,13 +59,17 @@ func TestBundleToTerraformJob(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "my job", out.Resource.Job["my_job"].Name) - assert.Len(t, out.Resource.Job["my_job"].JobCluster, 1) - assert.Equal(t, "https://github.com/foo/bar", out.Resource.Job["my_job"].GitSource.Url) - assert.Len(t, out.Resource.Job["my_job"].Parameter, 2) - assert.Equal(t, "param1", out.Resource.Job["my_job"].Parameter[0].Name) - assert.Equal(t, "param2", out.Resource.Job["my_job"].Parameter[1].Name) + resource := out.Resource.Job["my_job"].(*schema.ResourceJob) + + assert.Equal(t, "my job", resource.Name) + assert.Len(t, resource.JobCluster, 1) + assert.Equal(t, "https://github.com/foo/bar", resource.GitSource.Url) + assert.Len(t, resource.Parameter, 2) + assert.Equal(t, "param1", resource.Parameter[0].Name) + assert.Equal(t, "param2", resource.Parameter[1].Name) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformJobPermissions(t *testing.T) { @@ -83,12 +91,14 @@ func TestBundleToTerraformJobPermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["job_my_job"].JobId) - assert.Len(t, out.Resource.Permissions["job_my_job"].AccessControl, 1) + resource := out.Resource.Permissions["job_my_job"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["job_my_job"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_VIEW", p.PermissionLevel) + assert.NotEmpty(t, resource.JobId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformJobTaskLibraries(t *testing.T) { @@ -119,10 +129,58 @@ func TestBundleToTerraformJobTaskLibraries(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "my job", out.Resource.Job["my_job"].Name) - require.Len(t, out.Resource.Job["my_job"].Task, 1) - require.Len(t, out.Resource.Job["my_job"].Task[0].Library, 1) - assert.Equal(t, "mlflow", out.Resource.Job["my_job"].Task[0].Library[0].Pypi.Package) + resource := out.Resource.Job["my_job"].(*schema.ResourceJob) + + assert.Equal(t, "my job", resource.Name) + require.Len(t, resource.Task, 1) + require.Len(t, resource.Task[0].Library, 1) + assert.Equal(t, "mlflow", resource.Task[0].Library[0].Pypi.Package) + + bundleToTerraformEquivalenceTest(t, &config) +} + +func TestBundleToTerraformForEachTaskLibraries(t *testing.T) { + var src = resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "my job", + Tasks: []jobs.Task{ + { + TaskKey: "key", + ForEachTask: &jobs.ForEachTask{ + Inputs: "[1,2,3]", + Task: jobs.Task{ + TaskKey: "iteration", + Libraries: []compute.Library{ + { + Pypi: &compute.PythonPyPiLibrary{ + Package: "mlflow", + }, + }, + }, + }, + }, + }, + }, + }, + } + + var config = config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my_job": &src, + }, + }, + } + + out := BundleToTerraform(&config) + resource := out.Resource.Job["my_job"].(*schema.ResourceJob) + + assert.Equal(t, "my job", resource.Name) + require.Len(t, resource.Task, 1) + require.Len(t, resource.Task[0].ForEachTask.Task.Library, 1) + assert.Equal(t, "mlflow", resource.Task[0].ForEachTask.Task.Library[0].Pypi.Package) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformPipeline(t *testing.T) { @@ -173,15 +231,18 @@ func TestBundleToTerraformPipeline(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "my pipeline", out.Resource.Pipeline["my_pipeline"].Name) - assert.Len(t, out.Resource.Pipeline["my_pipeline"].Library, 2) - notifs := out.Resource.Pipeline["my_pipeline"].Notification - assert.Len(t, notifs, 2) - assert.Equal(t, notifs[0].Alerts, []string{"on-update-fatal-failure"}) - assert.Equal(t, notifs[0].EmailRecipients, []string{"jane@doe.com"}) - assert.Equal(t, notifs[1].Alerts, []string{"on-update-failure", "on-flow-failure"}) - assert.Equal(t, notifs[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"}) + resource := out.Resource.Pipeline["my_pipeline"].(*schema.ResourcePipeline) + + assert.Equal(t, "my pipeline", resource.Name) + assert.Len(t, resource.Library, 2) + assert.Len(t, resource.Notification, 2) + assert.Equal(t, resource.Notification[0].Alerts, []string{"on-update-fatal-failure"}) + assert.Equal(t, resource.Notification[0].EmailRecipients, []string{"jane@doe.com"}) + assert.Equal(t, resource.Notification[1].Alerts, []string{"on-update-failure", "on-flow-failure"}) + assert.Equal(t, resource.Notification[1].EmailRecipients, []string{"jane@doe.com", "john@doe.com"}) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformPipelinePermissions(t *testing.T) { @@ -203,12 +264,14 @@ func TestBundleToTerraformPipelinePermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["pipeline_my_pipeline"].PipelineId) - assert.Len(t, out.Resource.Permissions["pipeline_my_pipeline"].AccessControl, 1) + resource := out.Resource.Permissions["pipeline_my_pipeline"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["pipeline_my_pipeline"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_VIEW", p.PermissionLevel) + assert.NotEmpty(t, resource.PipelineId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModel(t *testing.T) { @@ -238,18 +301,25 @@ func TestBundleToTerraformModel(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "name", out.Resource.MlflowModel["my_model"].Name) - assert.Equal(t, "description", out.Resource.MlflowModel["my_model"].Description) - assert.Len(t, out.Resource.MlflowModel["my_model"].Tags, 2) - assert.Equal(t, "k1", out.Resource.MlflowModel["my_model"].Tags[0].Key) - assert.Equal(t, "v1", out.Resource.MlflowModel["my_model"].Tags[0].Value) - assert.Equal(t, "k2", out.Resource.MlflowModel["my_model"].Tags[1].Key) - assert.Equal(t, "v2", out.Resource.MlflowModel["my_model"].Tags[1].Value) + resource := out.Resource.MlflowModel["my_model"].(*schema.ResourceMlflowModel) + + assert.Equal(t, "name", resource.Name) + assert.Equal(t, "description", resource.Description) + assert.Len(t, resource.Tags, 2) + assert.Equal(t, "k1", resource.Tags[0].Key) + assert.Equal(t, "v1", resource.Tags[0].Value) + assert.Equal(t, "k2", resource.Tags[1].Key) + assert.Equal(t, "v2", resource.Tags[1].Value) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModelPermissions(t *testing.T) { var src = resources.MlflowModel{ + Model: &ml.Model{ + Name: "name", + }, Permissions: []resources.Permission{ { Level: "CAN_READ", @@ -267,12 +337,14 @@ func TestBundleToTerraformModelPermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["mlflow_model_my_model"].RegisteredModelId) - assert.Len(t, out.Resource.Permissions["mlflow_model_my_model"].AccessControl, 1) + resource := out.Resource.Permissions["mlflow_model_my_model"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["mlflow_model_my_model"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_READ", p.PermissionLevel) + assert.NotEmpty(t, resource.RegisteredModelId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformExperiment(t *testing.T) { @@ -291,12 +363,19 @@ func TestBundleToTerraformExperiment(t *testing.T) { } out := BundleToTerraform(&config) - assert.Equal(t, "name", out.Resource.MlflowExperiment["my_experiment"].Name) + resource := out.Resource.MlflowExperiment["my_experiment"].(*schema.ResourceMlflowExperiment) + + assert.Equal(t, "name", resource.Name) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformExperimentPermissions(t *testing.T) { var src = resources.MlflowExperiment{ + Experiment: &ml.Experiment{ + Name: "name", + }, Permissions: []resources.Permission{ { Level: "CAN_READ", @@ -314,13 +393,14 @@ func TestBundleToTerraformExperimentPermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["mlflow_experiment_my_experiment"].ExperimentId) - assert.Len(t, out.Resource.Permissions["mlflow_experiment_my_experiment"].AccessControl, 1) + resource := out.Resource.Permissions["mlflow_experiment_my_experiment"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["mlflow_experiment_my_experiment"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_READ", p.PermissionLevel) + assert.NotEmpty(t, resource.ExperimentId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_READ", resource.AccessControl[0].PermissionLevel) + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModelServing(t *testing.T) { @@ -357,7 +437,8 @@ func TestBundleToTerraformModelServing(t *testing.T) { } out := BundleToTerraform(&config) - resource := out.Resource.ModelServing["my_model_serving_endpoint"] + resource := out.Resource.ModelServing["my_model_serving_endpoint"].(*schema.ResourceModelServing) + assert.Equal(t, "name", resource.Name) assert.Equal(t, "model_name", resource.Config.ServedModels[0].ModelName) assert.Equal(t, "1", resource.Config.ServedModels[0].ModelVersion) @@ -366,10 +447,33 @@ func TestBundleToTerraformModelServing(t *testing.T) { assert.Equal(t, "model_name-1", resource.Config.TrafficConfig.Routes[0].ServedModelName) assert.Equal(t, 100, resource.Config.TrafficConfig.Routes[0].TrafficPercentage) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformModelServingPermissions(t *testing.T) { var src = resources.ModelServingEndpoint{ + CreateServingEndpoint: &serving.CreateServingEndpoint{ + Name: "name", + + // Need to specify this to satisfy the equivalence test: + // The previous method of generation includes the "create" field + // because it is required (not marked as `omitempty`). + // The previous method used [json.Marshal] from the standard library + // and as such observed the `omitempty` tag. + // The new method leverages [dyn.Value] where any field that is not + // explicitly set is not part of the value. + Config: serving.EndpointCoreConfigInput{ + ServedModels: []serving.ServedModelInput{ + { + ModelName: "model_name", + ModelVersion: "1", + ScaleToZeroEnabled: true, + WorkloadSize: "Small", + }, + }, + }, + }, Permissions: []resources.Permission{ { Level: "CAN_VIEW", @@ -387,13 +491,14 @@ func TestBundleToTerraformModelServingPermissions(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Permissions["model_serving_my_model_serving_endpoint"].ServingEndpointId) - assert.Len(t, out.Resource.Permissions["model_serving_my_model_serving_endpoint"].AccessControl, 1) + resource := out.Resource.Permissions["model_serving_my_model_serving_endpoint"].(*schema.ResourcePermissions) - p := out.Resource.Permissions["model_serving_my_model_serving_endpoint"].AccessControl[0] - assert.Equal(t, "jane@doe.com", p.UserName) - assert.Equal(t, "CAN_VIEW", p.PermissionLevel) + assert.NotEmpty(t, resource.ServingEndpointId) + assert.Len(t, resource.AccessControl, 1) + assert.Equal(t, "jane@doe.com", resource.AccessControl[0].UserName) + assert.Equal(t, "CAN_VIEW", resource.AccessControl[0].PermissionLevel) + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformRegisteredModel(t *testing.T) { @@ -415,16 +520,24 @@ func TestBundleToTerraformRegisteredModel(t *testing.T) { } out := BundleToTerraform(&config) - resource := out.Resource.RegisteredModel["my_registered_model"] + resource := out.Resource.RegisteredModel["my_registered_model"].(*schema.ResourceRegisteredModel) + assert.Equal(t, "name", resource.Name) assert.Equal(t, "catalog", resource.CatalogName) assert.Equal(t, "schema", resource.SchemaName) assert.Equal(t, "comment", resource.Comment) assert.Nil(t, out.Data) + + bundleToTerraformEquivalenceTest(t, &config) } func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { var src = resources.RegisteredModel{ + CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ + Name: "name", + CatalogName: "catalog", + SchemaName: "schema", + }, Grants: []resources.Grant{ { Privileges: []string{"EXECUTE"}, @@ -442,58 +555,104 @@ func TestBundleToTerraformRegisteredModelGrants(t *testing.T) { } out := BundleToTerraform(&config) - assert.NotEmpty(t, out.Resource.Grants["registered_model_my_registered_model"].Function) - assert.Len(t, out.Resource.Grants["registered_model_my_registered_model"].Grant, 1) + resource := out.Resource.Grants["registered_model_my_registered_model"].(*schema.ResourceGrants) - p := out.Resource.Grants["registered_model_my_registered_model"].Grant[0] - assert.Equal(t, "jane@doe.com", p.Principal) - assert.Equal(t, "EXECUTE", p.Privileges[0]) + assert.NotEmpty(t, resource.Function) + assert.Len(t, resource.Grant, 1) + assert.Equal(t, "jane@doe.com", resource.Grant[0].Principal) + assert.Equal(t, "EXECUTE", resource.Grant[0].Privileges[0]) + + bundleToTerraformEquivalenceTest(t, &config) +} + +func TestBundleToTerraformDeletedResources(t *testing.T) { + var job1 = resources.Job{ + JobSettings: &jobs.JobSettings{}, + } + var job2 = resources.Job{ + ModifiedStatus: resources.ModifiedStatusDeleted, + JobSettings: &jobs.JobSettings{}, + } + var config = config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my_job1": &job1, + "my_job2": &job2, + }, + }, + } + + vin, err := convert.FromTyped(config, dyn.NilValue) + require.NoError(t, err) + out, err := BundleToTerraformWithDynValue(context.Background(), vin) + require.NoError(t, err) + + _, ok := out.Resource.Job["my_job1"] + assert.True(t, ok) + _, ok = out.Resource.Job["my_job2"] + assert.False(t, ok) } func TestTerraformToBundleEmptyLocalResources(t *testing.T) { var config = config.Root{ Resources: config.Resources{}, } - var tfState = tfjson.State{ - Values: &tfjson.StateValues{ - RootModule: &tfjson.StateModule{ - Resources: []*tfjson.StateResource{ - { - Type: "databricks_job", - Mode: "managed", - Name: "test_job", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_pipeline", - Mode: "managed", - Name: "test_pipeline", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_mlflow_model", - Mode: "managed", - Name: "test_mlflow_model", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_mlflow_experiment", - Mode: "managed", - Name: "test_mlflow_experiment", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_model_serving", - Mode: "managed", - Name: "test_model_serving", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_registered_model", - Mode: "managed", - Name: "test_registered_model", - AttributeValues: map[string]interface{}{"id": "1"}, - }, + var tfState = resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_job", + Mode: "managed", + Name: "test_job", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "test_pipeline", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_mlflow_model", + Mode: "managed", + Name: "test_mlflow_model", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_mlflow_experiment", + Mode: "managed", + Name: "test_mlflow_experiment", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_model_serving", + Mode: "managed", + Name: "test_model_serving", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_registered_model", + Mode: "managed", + Name: "test_registered_model", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_quality_monitor", + Mode: "managed", + Name: "test_monitor", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, }, }, }, @@ -519,6 +678,9 @@ func TestTerraformToBundleEmptyLocalResources(t *testing.T) { assert.Equal(t, "1", config.Resources.RegisteredModels["test_registered_model"].ID) assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.RegisteredModels["test_registered_model"].ModifiedStatus) + assert.Equal(t, "1", config.Resources.QualityMonitors["test_monitor"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.QualityMonitors["test_monitor"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } @@ -567,10 +729,17 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { }, }, }, + QualityMonitors: map[string]*resources.QualityMonitor{ + "test_monitor": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "test_monitor", + }, + }, + }, }, } - var tfState = tfjson.State{ - Values: nil, + var tfState = resourcesState{ + Resources: nil, } err := TerraformToBundle(&tfState, &config) assert.NoError(t, err) @@ -593,6 +762,9 @@ func TestTerraformToBundleEmptyRemoteResources(t *testing.T) { assert.Equal(t, "", config.Resources.RegisteredModels["test_registered_model"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.RegisteredModels["test_registered_model"].ModifiedStatus) + assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.QualityMonitors["test_monitor"].ModifiedStatus) + AssertFullResourceCoverage(t, &config) } @@ -671,84 +843,132 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { }, }, }, + QualityMonitors: map[string]*resources.QualityMonitor{ + "test_monitor": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "test_monitor", + }, + }, + "test_monitor_new": { + CreateMonitor: &catalog.CreateMonitor{ + TableName: "test_monitor_new", + }, + }, + }, }, } - var tfState = tfjson.State{ - Values: &tfjson.StateValues{ - RootModule: &tfjson.StateModule{ - Resources: []*tfjson.StateResource{ - { - Type: "databricks_job", - Mode: "managed", - Name: "test_job", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_job", - Mode: "managed", - Name: "test_job_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_pipeline", - Mode: "managed", - Name: "test_pipeline", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_pipeline", - Mode: "managed", - Name: "test_pipeline_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_mlflow_model", - Mode: "managed", - Name: "test_mlflow_model", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_mlflow_model", - Mode: "managed", - Name: "test_mlflow_model_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_mlflow_experiment", - Mode: "managed", - Name: "test_mlflow_experiment", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_mlflow_experiment", - Mode: "managed", - Name: "test_mlflow_experiment_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_model_serving", - Mode: "managed", - Name: "test_model_serving", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_model_serving", - Mode: "managed", - Name: "test_model_serving_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, - { - Type: "databricks_registered_model", - Mode: "managed", - Name: "test_registered_model", - AttributeValues: map[string]interface{}{"id": "1"}, - }, - { - Type: "databricks_registered_model", - Mode: "managed", - Name: "test_registered_model_old", - AttributeValues: map[string]interface{}{"id": "2"}, - }, + var tfState = resourcesState{ + Resources: []stateResource{ + { + Type: "databricks_job", + Mode: "managed", + Name: "test_job", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_job", + Mode: "managed", + Name: "test_job_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "test_pipeline", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_pipeline", + Mode: "managed", + Name: "test_pipeline_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_mlflow_model", + Mode: "managed", + Name: "test_mlflow_model", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_mlflow_model", + Mode: "managed", + Name: "test_mlflow_model_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_mlflow_experiment", + Mode: "managed", + Name: "test_mlflow_experiment", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_mlflow_experiment", + Mode: "managed", + Name: "test_mlflow_experiment_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_model_serving", + Mode: "managed", + Name: "test_model_serving", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_model_serving", + Mode: "managed", + Name: "test_model_serving_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_registered_model", + Mode: "managed", + Name: "test_registered_model", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "1"}}, + }, + }, + { + Type: "databricks_registered_model", + Mode: "managed", + Name: "test_registered_model_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "2"}}, + }, + }, + { + Type: "databricks_quality_monitor", + Mode: "managed", + Name: "test_monitor", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "test_monitor"}}, + }, + }, + { + Type: "databricks_quality_monitor", + Mode: "managed", + Name: "test_monitor_old", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "test_monitor_old"}}, }, }, }, @@ -798,6 +1018,12 @@ func TestTerraformToBundleModifiedResources(t *testing.T) { assert.Equal(t, "", config.Resources.ModelServingEndpoints["test_model_serving_new"].ID) assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.ModelServingEndpoints["test_model_serving_new"].ModifiedStatus) + assert.Equal(t, "test_monitor", config.Resources.QualityMonitors["test_monitor"].ID) + assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor"].ModifiedStatus) + assert.Equal(t, "test_monitor_old", config.Resources.QualityMonitors["test_monitor_old"].ID) + assert.Equal(t, resources.ModifiedStatusDeleted, config.Resources.QualityMonitors["test_monitor_old"].ModifiedStatus) + assert.Equal(t, "", config.Resources.QualityMonitors["test_monitor_new"].ID) + assert.Equal(t, resources.ModifiedStatusCreated, config.Resources.QualityMonitors["test_monitor_new"].ModifiedStatus) AssertFullResourceCoverage(t, &config) } @@ -815,3 +1041,25 @@ func AssertFullResourceCoverage(t *testing.T, config *config.Root) { } } } + +func assertEqualTerraformRoot(t *testing.T, a, b *schema.Root) { + ba, err := json.Marshal(a) + require.NoError(t, err) + bb, err := json.Marshal(b) + require.NoError(t, err) + assert.JSONEq(t, string(ba), string(bb)) +} + +func bundleToTerraformEquivalenceTest(t *testing.T, config *config.Root) { + t.Run("dyn equivalence", func(t *testing.T) { + tf1 := BundleToTerraform(config) + + vin, err := convert.FromTyped(config, dyn.NilValue) + require.NoError(t, err) + tf2, err := BundleToTerraformWithDynValue(context.Background(), vin) + require.NoError(t, err) + + // Compare roots + assertEqualTerraformRoot(t, tf1, tf2) + }) +} diff --git a/bundle/deploy/terraform/destroy.go b/bundle/deploy/terraform/destroy.go index 0b3baba3b..16f074a22 100644 --- a/bundle/deploy/terraform/destroy.go +++ b/bundle/deploy/terraform/destroy.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/fatih/color" "github.com/hashicorp/terraform-exec/tfexec" tfjson "github.com/hashicorp/terraform-json" @@ -62,7 +63,7 @@ func (w *destroy) Name() string { return "terraform.Destroy" } -func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { +func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { // return early if plan is empty if b.Plan.IsEmpty { cmdio.LogString(ctx, "No resources to destroy in plan. Skipping destroy!") @@ -71,19 +72,19 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } // read plan file plan, err := tf.ShowPlanFile(ctx, b.Plan.Path) if err != nil { - return err + return diag.FromErr(err) } // print the resources that will be destroyed err = logDestroyPlan(ctx, plan.ResourceChanges) if err != nil { - return err + return diag.FromErr(err) } // Ask for confirmation, if needed @@ -91,7 +92,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { red := color.New(color.FgRed).SprintFunc() b.Plan.ConfirmApply, err = cmdio.AskYesOrNo(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed?", red("destroy"))) if err != nil { - return err + return diag.FromErr(err) } } @@ -101,7 +102,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { } if b.Plan.Path == "" { - return fmt.Errorf("no plan found") + return diag.Errorf("no plan found") } cmdio.LogString(ctx, "Starting to destroy resources") @@ -109,7 +110,7 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error { // Apply terraform according to the computed destroy plan err = tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path)) if err != nil { - return fmt.Errorf("terraform destroy: %w", err) + return diag.Errorf("terraform destroy: %v", err) } cmdio.LogString(ctx, "Successfully destroyed resources!") diff --git a/bundle/deploy/terraform/filer.go b/bundle/deploy/terraform/filer.go deleted file mode 100644 index b1fa5a1bd..000000000 --- a/bundle/deploy/terraform/filer.go +++ /dev/null @@ -1,14 +0,0 @@ -package terraform - -import ( - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/libs/filer" -) - -// filerFunc is a function that returns a filer.Filer. -type filerFunc func(b *bundle.Bundle) (filer.Filer, error) - -// stateFiler returns a filer.Filer that can be used to read/write state files. -func stateFiler(b *bundle.Bundle) (filer.Filer, error) { - return filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath) -} diff --git a/bundle/deploy/terraform/import.go b/bundle/deploy/terraform/import.go new file mode 100644 index 000000000..7c1a68158 --- /dev/null +++ b/bundle/deploy/terraform/import.go @@ -0,0 +1,109 @@ +package terraform + +import ( + "bytes" + "context" + "fmt" + "io" + "os" + "path/filepath" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" + "github.com/hashicorp/terraform-exec/tfexec" +) + +type BindOptions struct { + AutoApprove bool + ResourceType string + ResourceKey string + ResourceId string +} + +type importResource struct { + opts *BindOptions +} + +// Apply implements bundle.Mutator. +func (m *importResource) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + dir, err := Dir(ctx, b) + if err != nil { + return diag.FromErr(err) + } + + tf := b.Terraform + if tf == nil { + return diag.Errorf("terraform not initialized") + } + + err = tf.Init(ctx, tfexec.Upgrade(true)) + if err != nil { + return diag.Errorf("terraform init: %v", err) + } + tmpDir, err := os.MkdirTemp("", "state-*") + if err != nil { + return diag.Errorf("terraform init: %v", err) + } + tmpState := filepath.Join(tmpDir, TerraformStateFileName) + + importAddress := fmt.Sprintf("%s.%s", m.opts.ResourceType, m.opts.ResourceKey) + err = tf.Import(ctx, importAddress, m.opts.ResourceId, tfexec.StateOut(tmpState)) + if err != nil { + return diag.Errorf("terraform import: %v", err) + } + + buf := bytes.NewBuffer(nil) + tf.SetStdout(buf) + + //lint:ignore SA1019 We use legacy -state flag for now to plan the import changes based on temporary state file + changed, err := tf.Plan(ctx, tfexec.State(tmpState), tfexec.Target(importAddress)) + if err != nil { + return diag.Errorf("terraform plan: %v", err) + } + + defer os.RemoveAll(tmpDir) + + if changed && !m.opts.AutoApprove { + output := buf.String() + // Remove output starting from Warning until end of output + output = output[:bytes.Index([]byte(output), []byte("Warning:"))] + cmdio.LogString(ctx, output) + ans, err := cmdio.AskYesOrNo(ctx, "Confirm import changes? Changes will be remotely applied only after running 'bundle deploy'.") + if err != nil { + return diag.FromErr(err) + } + if !ans { + return diag.Errorf("import aborted") + } + } + + // If user confirmed changes, move the state file from temp dir to state location + f, err := os.Create(filepath.Join(dir, TerraformStateFileName)) + if err != nil { + return diag.FromErr(err) + } + defer f.Close() + + tmpF, err := os.Open(tmpState) + if err != nil { + return diag.FromErr(err) + } + defer tmpF.Close() + + _, err = io.Copy(f, tmpF) + if err != nil { + return diag.FromErr(err) + } + + return nil +} + +// Name implements bundle.Mutator. +func (*importResource) Name() string { + return "terraform.Import" +} + +func Import(opts *BindOptions) bundle.Mutator { + return &importResource{opts: opts} +} diff --git a/bundle/deploy/terraform/init.go b/bundle/deploy/terraform/init.go index 503a1db24..d480242ce 100644 --- a/bundle/deploy/terraform/init.go +++ b/bundle/deploy/terraform/init.go @@ -2,7 +2,9 @@ package terraform import ( "context" + "errors" "fmt" + "io/fs" "os" "os/exec" "path/filepath" @@ -12,9 +14,10 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/log" - "github.com/hashicorp/go-version" "github.com/hashicorp/hc-install/product" "github.com/hashicorp/hc-install/releases" "github.com/hashicorp/terraform-exec/tfexec" @@ -39,6 +42,17 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con return tf.ExecPath, nil } + // Load exec path from the environment if it matches the currently used version. + envExecPath, err := getEnvVarWithMatchingVersion(ctx, TerraformExecPathEnv, TerraformVersionEnv, TerraformVersion.String()) + if err != nil { + return "", err + } + if envExecPath != "" { + tf.ExecPath = envExecPath + log.Debugf(ctx, "Using Terraform from %s at %s", TerraformExecPathEnv, tf.ExecPath) + return tf.ExecPath, nil + } + binDir, err := b.CacheDir(context.Background(), "bin") if err != nil { return "", err @@ -47,7 +61,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con // If the execPath already exists, return it. execPath := filepath.Join(binDir, product.Terraform.BinaryName()) _, err = os.Stat(execPath) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return "", err } if err == nil { @@ -59,7 +73,7 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con // Download Terraform to private bin directory. installer := &releases.ExactVersion{ Product: product.Terraform, - Version: version.Must(version.NewVersion("1.5.5")), + Version: TerraformVersion, InstallDir: binDir, Timeout: 1 * time.Minute, } @@ -97,14 +111,67 @@ func inheritEnvVars(ctx context.Context, environ map[string]string) error { } // Include $TF_CLI_CONFIG_FILE to override terraform provider in development. - configFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE") + // See: https://developer.hashicorp.com/terraform/cli/config/config-file#explicit-installation-method-configuration + devConfigFile, ok := env.Lookup(ctx, "TF_CLI_CONFIG_FILE") if ok { + environ["TF_CLI_CONFIG_FILE"] = devConfigFile + } + + // Map $DATABRICKS_TF_CLI_CONFIG_FILE to $TF_CLI_CONFIG_FILE + // VSCode extension provides a file with the "provider_installation.filesystem_mirror" configuration. + // We only use it if the provider version matches the currently used version, + // otherwise terraform will fail to download the right version (even with unrestricted internet access). + configFile, err := getEnvVarWithMatchingVersion(ctx, TerraformCliConfigPathEnv, TerraformProviderVersionEnv, schema.ProviderVersion) + if err != nil { + return err + } + if configFile != "" { + log.Debugf(ctx, "Using Terraform CLI config from %s at %s", TerraformCliConfigPathEnv, configFile) environ["TF_CLI_CONFIG_FILE"] = configFile } return nil } +// Example: this function will return a value of TF_EXEC_PATH only if the path exists and if TF_VERSION matches the TerraformVersion. +// This function is used for env vars set by the Databricks VSCode extension. The variables are intended to be used by the CLI +// bundled with the Databricks VSCode extension, but users can use different CLI versions in the VSCode terminals, in which case we want to ignore +// the variables if that CLI uses different versions of the dependencies. +func getEnvVarWithMatchingVersion(ctx context.Context, envVarName string, versionVarName string, currentVersion string) (string, error) { + envValue := env.Get(ctx, envVarName) + versionValue := env.Get(ctx, versionVarName) + + // return early if the environment variable is not set + if envValue == "" { + log.Debugf(ctx, "%s is not defined", envVarName) + return "", nil + } + + // If the path does not exist, we return early. + _, err := os.Stat(envValue) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + log.Debugf(ctx, "%s at %s does not exist", envVarName, envValue) + return "", nil + } else { + return "", err + } + } + + // If the version environment variable is not set, we directly return the value of the environment variable. + if versionValue == "" { + return envValue, nil + } + + // When the version environment variable is set, we check if it matches the current version. + // If it does not match, we return an empty string. + if versionValue != currentVersion { + log.Debugf(ctx, "%s as %s does not match the current version %s, ignoring %s", versionVarName, versionValue, currentVersion, envVarName) + return "", nil + } + return envValue, nil +} + // This function sets temp dir location for terraform to use. If user does not // specify anything here, we fall back to a `tmp` directory in the bundle's cache // directory @@ -151,7 +218,24 @@ func setProxyEnvVars(ctx context.Context, environ map[string]string, b *bundle.B return nil } -func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) error { +func setUserAgentExtraEnvVar(environ map[string]string, b *bundle.Bundle) error { + var products []string + + if experimental := b.Config.Experimental; experimental != nil { + if experimental.PyDABs.Enabled { + products = append(products, "databricks-pydabs/0.0.0") + } + } + + userAgentExtra := strings.Join(products, " ") + if userAgentExtra != "" { + environ["DATABRICKS_USER_AGENT_EXTRA"] = userAgentExtra + } + + return nil +} + +func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tfConfig := b.Config.Bundle.Terraform if tfConfig == nil { tfConfig = &config.Terraform{} @@ -160,46 +244,51 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) error { execPath, err := m.findExecPath(ctx, b, tfConfig) if err != nil { - return err + return diag.FromErr(err) } workingDir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } tf, err := tfexec.NewTerraform(workingDir, execPath) if err != nil { - return err + return diag.FromErr(err) } environ, err := b.AuthEnv() if err != nil { - return err + return diag.FromErr(err) } err = inheritEnvVars(ctx, environ) if err != nil { - return err + return diag.FromErr(err) } // Set the temporary directory environment variables err = setTempDirEnvVars(ctx, environ, b) if err != nil { - return err + return diag.FromErr(err) } // Set the proxy related environment variables err = setProxyEnvVars(ctx, environ, b) if err != nil { - return err + return diag.FromErr(err) + } + + err = setUserAgentExtraEnvVar(environ, b) + if err != nil { + return diag.FromErr(err) } // Configure environment variables for auth for Terraform to use. log.Debugf(ctx, "Environment variables for Terraform: %s", strings.Join(maps.Keys(environ), ", ")) err = tf.SetEnv(environ) if err != nil { - return err + return diag.FromErr(err) } b.Terraform = tf diff --git a/bundle/deploy/terraform/init_test.go b/bundle/deploy/terraform/init_test.go index 4b00e18e4..aa9b2f77f 100644 --- a/bundle/deploy/terraform/init_test.go +++ b/bundle/deploy/terraform/init_test.go @@ -4,12 +4,17 @@ import ( "context" "os" "os/exec" + "path/filepath" "runtime" "strings" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/env" + "github.com/hashicorp/hc-install/product" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -28,8 +33,8 @@ func TestInitEnvironmentVariables(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", Terraform: &config.Terraform{ @@ -45,8 +50,8 @@ func TestInitEnvironmentVariables(t *testing.T) { t.Setenv("DATABRICKS_TOKEN", "foobar") b.WorkspaceClient() - err = bundle.Apply(context.Background(), b, Initialize()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, Initialize()) + require.NoError(t, diags.Error()) } func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) { @@ -55,8 +60,8 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -83,8 +88,8 @@ func TestSetTempDirEnvVarsForUnixWithTmpDirNotSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -109,8 +114,8 @@ func TestSetTempDirEnvVarsForWindowWithAllTmpDirEnvVarsSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -139,8 +144,8 @@ func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -169,8 +174,8 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -197,8 +202,8 @@ func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) { func TestSetProxyEnvVars(t *testing.T) { b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", }, @@ -243,6 +248,27 @@ func TestSetProxyEnvVars(t *testing.T) { assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env)) } +func TestSetUserAgentExtraEnvVar(t *testing.T) { + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Experimental: &config.Experimental{ + PyDABs: config.PyDABs{ + Enabled: true, + }, + }, + }, + } + + env := make(map[string]string, 0) + err := setUserAgentExtraEnvVar(env, b) + + require.NoError(t, err) + assert.Equal(t, map[string]string{ + "DATABRICKS_USER_AGENT_EXTRA": "databricks-pydabs/0.0.0", + }, env) +} + func TestInheritEnvVars(t *testing.T) { env := map[string]string{} @@ -269,3 +295,179 @@ func TestSetUserProfileFromInheritEnvVars(t *testing.T) { assert.Contains(t, env, "USERPROFILE") assert.Equal(t, env["USERPROFILE"], "c:\\foo\\c") } + +func TestInheritEnvVarsWithAbsentTFConfigFile(t *testing.T) { + ctx := context.Background() + envMap := map[string]string{} + ctx = env.Set(ctx, "DATABRICKS_TF_PROVIDER_VERSION", schema.ProviderVersion) + ctx = env.Set(ctx, "DATABRICKS_TF_CLI_CONFIG_FILE", "/tmp/config.tfrc") + err := inheritEnvVars(ctx, envMap) + require.NoError(t, err) + require.NotContains(t, envMap, "TF_CLI_CONFIG_FILE") +} + +func TestInheritEnvVarsWithWrongTFProviderVersion(t *testing.T) { + ctx := context.Background() + envMap := map[string]string{} + configFile := createTempFile(t, t.TempDir(), "config.tfrc", false) + ctx = env.Set(ctx, "DATABRICKS_TF_PROVIDER_VERSION", "wrong") + ctx = env.Set(ctx, "DATABRICKS_TF_CLI_CONFIG_FILE", configFile) + err := inheritEnvVars(ctx, envMap) + require.NoError(t, err) + require.NotContains(t, envMap, "TF_CLI_CONFIG_FILE") +} + +func TestInheritEnvVarsWithCorrectTFCLIConfigFile(t *testing.T) { + ctx := context.Background() + envMap := map[string]string{} + configFile := createTempFile(t, t.TempDir(), "config.tfrc", false) + ctx = env.Set(ctx, "DATABRICKS_TF_PROVIDER_VERSION", schema.ProviderVersion) + ctx = env.Set(ctx, "DATABRICKS_TF_CLI_CONFIG_FILE", configFile) + err := inheritEnvVars(ctx, envMap) + require.NoError(t, err) + require.Contains(t, envMap, "TF_CLI_CONFIG_FILE") + require.Equal(t, configFile, envMap["TF_CLI_CONFIG_FILE"]) +} + +func TestFindExecPathFromEnvironmentWithWrongVersion(t *testing.T) { + ctx := context.Background() + m := &initialize{} + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{}, + }, + }, + } + // Create a pre-existing terraform bin to avoid downloading it + cacheDir, _ := b.CacheDir(ctx, "bin") + existingExecPath := createTempFile(t, cacheDir, product.Terraform.BinaryName(), true) + // Create a new terraform binary and expose it through env vars + tmpBinPath := createTempFile(t, t.TempDir(), "terraform-bin", true) + ctx = env.Set(ctx, "DATABRICKS_TF_VERSION", "1.2.3") + ctx = env.Set(ctx, "DATABRICKS_TF_EXEC_PATH", tmpBinPath) + _, err := m.findExecPath(ctx, b, b.Config.Bundle.Terraform) + require.NoError(t, err) + require.Equal(t, existingExecPath, b.Config.Bundle.Terraform.ExecPath) +} + +func TestFindExecPathFromEnvironmentWithCorrectVersionAndNoBinary(t *testing.T) { + ctx := context.Background() + m := &initialize{} + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{}, + }, + }, + } + // Create a pre-existing terraform bin to avoid downloading it + cacheDir, _ := b.CacheDir(ctx, "bin") + existingExecPath := createTempFile(t, cacheDir, product.Terraform.BinaryName(), true) + + ctx = env.Set(ctx, "DATABRICKS_TF_VERSION", TerraformVersion.String()) + ctx = env.Set(ctx, "DATABRICKS_TF_EXEC_PATH", "/tmp/terraform") + _, err := m.findExecPath(ctx, b, b.Config.Bundle.Terraform) + require.NoError(t, err) + require.Equal(t, existingExecPath, b.Config.Bundle.Terraform.ExecPath) +} + +func TestFindExecPathFromEnvironmentWithCorrectVersionAndBinary(t *testing.T) { + ctx := context.Background() + m := &initialize{} + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{}, + }, + }, + } + // Create a pre-existing terraform bin to avoid downloading it + cacheDir, _ := b.CacheDir(ctx, "bin") + createTempFile(t, cacheDir, product.Terraform.BinaryName(), true) + // Create a new terraform binary and expose it through env vars + tmpBinPath := createTempFile(t, t.TempDir(), "terraform-bin", true) + ctx = env.Set(ctx, "DATABRICKS_TF_VERSION", TerraformVersion.String()) + ctx = env.Set(ctx, "DATABRICKS_TF_EXEC_PATH", tmpBinPath) + _, err := m.findExecPath(ctx, b, b.Config.Bundle.Terraform) + require.NoError(t, err) + require.Equal(t, tmpBinPath, b.Config.Bundle.Terraform.ExecPath) +} + +func createTempFile(t *testing.T, dest string, name string, executable bool) string { + binPath := filepath.Join(dest, name) + f, err := os.Create(binPath) + require.NoError(t, err) + defer func() { + err = f.Close() + require.NoError(t, err) + }() + if executable { + err = f.Chmod(0777) + require.NoError(t, err) + } + return binPath +} + +func TestGetEnvVarWithMatchingVersion(t *testing.T) { + envVarName := "FOO" + versionVarName := "FOO_VERSION" + + tmp := t.TempDir() + file := testutil.Touch(t, tmp, "bar") + + var tc = []struct { + envValue string + versionValue string + currentVersion string + expected string + }{ + { + envValue: file, + versionValue: "1.2.3", + currentVersion: "1.2.3", + expected: file, + }, + { + envValue: "does-not-exist", + versionValue: "1.2.3", + currentVersion: "1.2.3", + expected: "", + }, + { + envValue: file, + versionValue: "1.2.3", + currentVersion: "1.2.4", + expected: "", + }, + { + envValue: "", + versionValue: "1.2.3", + currentVersion: "1.2.3", + expected: "", + }, + { + envValue: file, + versionValue: "", + currentVersion: "1.2.3", + expected: file, + }, + } + + for _, c := range tc { + t.Run("", func(t *testing.T) { + t.Setenv(envVarName, c.envValue) + t.Setenv(versionVarName, c.versionValue) + + actual, err := getEnvVarWithMatchingVersion(context.Background(), envVarName, versionVarName, c.currentVersion) + require.NoError(t, err) + assert.Equal(t, c.expected, actual) + }) + } +} diff --git a/bundle/deploy/terraform/interpolate.go b/bundle/deploy/terraform/interpolate.go index 4f00c27eb..608f1c795 100644 --- a/bundle/deploy/terraform/interpolate.go +++ b/bundle/deploy/terraform/interpolate.go @@ -1,44 +1,69 @@ package terraform import ( + "context" "fmt" - "strings" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/interpolation" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" ) -// Rewrite variable references to resources into Terraform compatible format. -func interpolateTerraformResourceIdentifiers(path string, lookup map[string]string) (string, error) { - parts := strings.Split(path, interpolation.Delimiter) - if parts[0] == "resources" { - switch parts[1] { - case "pipelines": - path = strings.Join(append([]string{"databricks_pipeline"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "jobs": - path = strings.Join(append([]string{"databricks_job"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "models": - path = strings.Join(append([]string{"databricks_mlflow_model"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "experiments": - path = strings.Join(append([]string{"databricks_mlflow_experiment"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "model_serving_endpoints": - path = strings.Join(append([]string{"databricks_model_serving"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - case "registered_models": - path = strings.Join(append([]string{"databricks_registered_model"}, parts[2:]...), interpolation.Delimiter) - return fmt.Sprintf("${%s}", path), nil - default: - panic("TODO: " + parts[1]) - } - } - - return interpolation.DefaultLookup(path, lookup) +type interpolateMutator struct { } func Interpolate() bundle.Mutator { - return interpolation.Interpolate(interpolateTerraformResourceIdentifiers) + return &interpolateMutator{} +} + +func (m *interpolateMutator) Name() string { + return "terraform.Interpolate" +} + +func (m *interpolateMutator) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + prefix := dyn.MustPathFromString("resources") + + // Resolve variable references in all values. + return dynvar.Resolve(root, func(path dyn.Path) (dyn.Value, error) { + // Expect paths of the form: + // - resources...... + if !path.HasPrefix(prefix) || len(path) < 4 { + return dyn.InvalidValue, dynvar.ErrSkipResolution + } + + // Rewrite the bundle configuration path: + // + // ${resources.pipelines.my_pipeline.id} + // + // into the Terraform-compatible resource identifier: + // + // ${databricks_pipeline.my_pipeline.id} + // + switch path[1] { + case dyn.Key("pipelines"): + path = dyn.NewPath(dyn.Key("databricks_pipeline")).Append(path[2:]...) + case dyn.Key("jobs"): + path = dyn.NewPath(dyn.Key("databricks_job")).Append(path[2:]...) + case dyn.Key("models"): + path = dyn.NewPath(dyn.Key("databricks_mlflow_model")).Append(path[2:]...) + case dyn.Key("experiments"): + path = dyn.NewPath(dyn.Key("databricks_mlflow_experiment")).Append(path[2:]...) + case dyn.Key("model_serving_endpoints"): + path = dyn.NewPath(dyn.Key("databricks_model_serving")).Append(path[2:]...) + case dyn.Key("registered_models"): + path = dyn.NewPath(dyn.Key("databricks_registered_model")).Append(path[2:]...) + case dyn.Key("quality_monitors"): + path = dyn.NewPath(dyn.Key("databricks_quality_monitor")).Append(path[2:]...) + default: + // Trigger "key not found" for unknown resource types. + return dyn.GetByPath(root, path) + } + + return dyn.V(fmt.Sprintf("${%s}", path.String())), nil + }) + }) + + return diag.FromErr(err) } diff --git a/bundle/deploy/terraform/interpolate_test.go b/bundle/deploy/terraform/interpolate_test.go new file mode 100644 index 000000000..9af4a1443 --- /dev/null +++ b/bundle/deploy/terraform/interpolate_test.go @@ -0,0 +1,92 @@ +package terraform + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestInterpolate(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "example", + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my_job": { + JobSettings: &jobs.JobSettings{ + Tags: map[string]string{ + "other_pipeline": "${resources.pipelines.other_pipeline.id}", + "other_job": "${resources.jobs.other_job.id}", + "other_model": "${resources.models.other_model.id}", + "other_experiment": "${resources.experiments.other_experiment.id}", + "other_model_serving": "${resources.model_serving_endpoints.other_model_serving.id}", + "other_registered_model": "${resources.registered_models.other_registered_model.id}", + }, + Tasks: []jobs.Task{ + { + TaskKey: "my_task", + NotebookTask: &jobs.NotebookTask{ + BaseParameters: map[string]string{ + "model_name": "${resources.models.my_model.name}", + }, + }, + }, + }, + }, + }, + }, + Models: map[string]*resources.MlflowModel{ + "my_model": { + Model: &ml.Model{ + Name: "my_model", + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, Interpolate()) + require.NoError(t, diags.Error()) + + j := b.Config.Resources.Jobs["my_job"] + assert.Equal(t, "${databricks_pipeline.other_pipeline.id}", j.Tags["other_pipeline"]) + assert.Equal(t, "${databricks_job.other_job.id}", j.Tags["other_job"]) + assert.Equal(t, "${databricks_mlflow_model.other_model.id}", j.Tags["other_model"]) + assert.Equal(t, "${databricks_mlflow_experiment.other_experiment.id}", j.Tags["other_experiment"]) + assert.Equal(t, "${databricks_model_serving.other_model_serving.id}", j.Tags["other_model_serving"]) + assert.Equal(t, "${databricks_registered_model.other_registered_model.id}", j.Tags["other_registered_model"]) + + m := b.Config.Resources.Models["my_model"] + assert.Equal(t, "my_model", m.Model.Name) +} + +func TestInterpolateUnknownResourceType(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "my_job": { + JobSettings: &jobs.JobSettings{ + Tags: map[string]string{ + "other_unknown": "${resources.unknown.other_unknown.id}", + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, Interpolate()) + assert.ErrorContains(t, diags.Error(), `reference does not exist: ${resources.unknown.other_unknown.id}`) +} diff --git a/bundle/deploy/terraform/load.go b/bundle/deploy/terraform/load.go index 624bf7a50..3fb76855e 100644 --- a/bundle/deploy/terraform/load.go +++ b/bundle/deploy/terraform/load.go @@ -6,8 +6,8 @@ import ( "slices" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/hashicorp/terraform-exec/tfexec" - tfjson "github.com/hashicorp/terraform-json" ) type loadMode int @@ -22,46 +22,43 @@ func (l *load) Name() string { return "terraform.Load" } -func (l *load) Apply(ctx context.Context, b *bundle.Bundle) error { +func (l *load) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } err := tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } - state, err := b.Terraform.Show(ctx) + state, err := ParseResourcesState(ctx, b) if err != nil { - return err + return diag.FromErr(err) } err = l.validateState(state) if err != nil { - return err + return diag.FromErr(err) } // Merge state into configuration. err = TerraformToBundle(state, &b.Config) if err != nil { - return err + return diag.FromErr(err) } return nil } -func (l *load) validateState(state *tfjson.State) error { - if state.Values == nil { - if slices.Contains(l.modes, ErrorOnEmptyState) { - return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?") - } - return nil +func (l *load) validateState(state *resourcesState) error { + if state.Version != SupportedStateVersion { + return fmt.Errorf("unsupported deployment state version: %d. Try re-deploying the bundle", state.Version) } - if state.Values.RootModule == nil { - return fmt.Errorf("malformed terraform state: RootModule not set") + if len(state.Resources) == 0 && slices.Contains(l.modes, ErrorOnEmptyState) { + return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?") } return nil diff --git a/bundle/deploy/terraform/load_test.go b/bundle/deploy/terraform/load_test.go index aeaffa14e..c62217187 100644 --- a/bundle/deploy/terraform/load_test.go +++ b/bundle/deploy/terraform/load_test.go @@ -17,8 +17,8 @@ func TestLoadWithNoState(t *testing.T) { } b := &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ - Path: t.TempDir(), Bundle: config.Bundle{ Target: "whatever", Terraform: &config.Terraform{ @@ -32,10 +32,10 @@ func TestLoadWithNoState(t *testing.T) { t.Setenv("DATABRICKS_TOKEN", "foobar") b.WorkspaceClient() - err = bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( Initialize(), Load(ErrorOnEmptyState), )) - require.ErrorContains(t, err, "Did you forget to run 'databricks bundle deploy'") + require.ErrorContains(t, diags.Error(), "Did you forget to run 'databricks bundle deploy'") } diff --git a/bundle/deploy/terraform/pkg.go b/bundle/deploy/terraform/pkg.go index 5e3807be7..cad754024 100644 --- a/bundle/deploy/terraform/pkg.go +++ b/bundle/deploy/terraform/pkg.go @@ -1,3 +1,56 @@ package terraform +import ( + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/hashicorp/go-version" +) + const TerraformStateFileName = "terraform.tfstate" +const TerraformConfigFileName = "bundle.tf.json" + +// Users can provide their own terraform binary and databricks terraform provider by setting the following environment variables. +// This allows users to use the CLI in an air-gapped environments. See the `debug terraform` command. +const TerraformExecPathEnv = "DATABRICKS_TF_EXEC_PATH" +const TerraformVersionEnv = "DATABRICKS_TF_VERSION" +const TerraformCliConfigPathEnv = "DATABRICKS_TF_CLI_CONFIG_FILE" +const TerraformProviderVersionEnv = "DATABRICKS_TF_PROVIDER_VERSION" + +// Terraform CLI version to use and the corresponding checksums for it. The +// checksums are used to verify the integrity of the downloaded binary. Please +// update the checksums when the Terraform version is updated. The checksums +// were obtained from https://releases.hashicorp.com/terraform/1.5.5. +// +// These hashes are not used inside the CLI. They are only co-located here to be +// output in the "databricks bundle debug terraform" output. Downstream applications +// like the CLI docker image use these checksums to verify the integrity of the +// downloaded Terraform archive. +var TerraformVersion = version.Must(version.NewVersion("1.5.5")) + +const checksumLinuxArm64 = "b055aefe343d0b710d8a7afd31aeb702b37bbf4493bb9385a709991e48dfbcd2" +const checksumLinuxAmd64 = "ad0c696c870c8525357b5127680cd79c0bdf58179af9acd091d43b1d6482da4a" + +type Checksum struct { + LinuxArm64 string `json:"linux_arm64"` + LinuxAmd64 string `json:"linux_amd64"` +} + +type TerraformMetadata struct { + Version string `json:"version"` + Checksum Checksum `json:"checksum"` + ProviderHost string `json:"providerHost"` + ProviderSource string `json:"providerSource"` + ProviderVersion string `json:"providerVersion"` +} + +func NewTerraformMetadata() *TerraformMetadata { + return &TerraformMetadata{ + Version: TerraformVersion.String(), + Checksum: Checksum{ + LinuxArm64: checksumLinuxArm64, + LinuxAmd64: checksumLinuxAmd64, + }, + ProviderHost: schema.ProviderHost, + ProviderSource: schema.ProviderSource, + ProviderVersion: schema.ProviderVersion, + } +} diff --git a/bundle/deploy/terraform/pkg_test.go b/bundle/deploy/terraform/pkg_test.go new file mode 100644 index 000000000..b8dcb9e08 --- /dev/null +++ b/bundle/deploy/terraform/pkg_test.go @@ -0,0 +1,51 @@ +package terraform + +import ( + "crypto/sha256" + "encoding/hex" + "fmt" + "io" + "net/http" + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func downloadAndChecksum(t *testing.T, url string, expectedChecksum string) { + resp, err := http.Get(url) + require.NoError(t, err) + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + t.Fatalf("failed to download %s: %s", url, resp.Status) + } + + tmpDir := t.TempDir() + tmpFile, err := os.Create(filepath.Join(tmpDir, "archive.zip")) + require.NoError(t, err) + defer tmpFile.Close() + + _, err = io.Copy(tmpFile, resp.Body) + require.NoError(t, err) + + _, err = tmpFile.Seek(0, 0) // go back to the start of the file + require.NoError(t, err) + + hash := sha256.New() + _, err = io.Copy(hash, tmpFile) + require.NoError(t, err) + + checksum := hex.EncodeToString(hash.Sum(nil)) + assert.Equal(t, expectedChecksum, checksum) +} + +func TestTerraformArchiveChecksums(t *testing.T) { + armUrl := fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_linux_arm64.zip", TerraformVersion, TerraformVersion) + amdUrl := fmt.Sprintf("https://releases.hashicorp.com/terraform/%s/terraform_%s_linux_amd64.zip", TerraformVersion, TerraformVersion) + + downloadAndChecksum(t, amdUrl, checksumLinuxAmd64) + downloadAndChecksum(t, armUrl, checksumLinuxArm64) +} diff --git a/bundle/deploy/terraform/plan.go b/bundle/deploy/terraform/plan.go index ff841148c..50e0f78ca 100644 --- a/bundle/deploy/terraform/plan.go +++ b/bundle/deploy/terraform/plan.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/terraform" "github.com/hashicorp/terraform-exec/tfexec" ) @@ -26,30 +27,30 @@ func (p *plan) Name() string { return "terraform.Plan" } -func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) error { +func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { tf := b.Terraform if tf == nil { - return fmt.Errorf("terraform not initialized") + return diag.Errorf("terraform not initialized") } cmdio.LogString(ctx, "Starting plan computation") err := tf.Init(ctx, tfexec.Upgrade(true)) if err != nil { - return fmt.Errorf("terraform init: %w", err) + return diag.Errorf("terraform init: %v", err) } // Persist computed plan tfDir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } planPath := filepath.Join(tfDir, "plan") destroy := p.goal == PlanDestroy notEmpty, err := tf.Plan(ctx, tfexec.Destroy(destroy), tfexec.Out(planPath)) if err != nil { - return err + return diag.FromErr(err) } // Set plan in main bundle struct for downstream mutators diff --git a/bundle/deploy/terraform/state_pull.go b/bundle/deploy/terraform/state_pull.go index 14e8ecf12..cc7d34274 100644 --- a/bundle/deploy/terraform/state_pull.go +++ b/bundle/deploy/terraform/state_pull.go @@ -10,12 +10,14 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) type statePull struct { - filerFunc + filerFactory deploy.FilerFactory } func (l *statePull) Name() string { @@ -44,15 +46,15 @@ func (l *statePull) remoteState(ctx context.Context, f filer.Filer) (*bytes.Buff return &buf, nil } -func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { - f, err := l.filerFunc(b) +func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + f, err := l.filerFactory(b) if err != nil { - return err + return diag.FromErr(err) } dir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } // Download state file from filer to local cache directory. @@ -60,7 +62,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { remote, err := l.remoteState(ctx, f) if err != nil { log.Infof(ctx, "Unable to open remote state file: %s", err) - return err + return diag.FromErr(err) } if remote == nil { log.Infof(ctx, "Remote state file does not exist") @@ -70,7 +72,7 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { // Expect the state file to live under dir. local, err := os.OpenFile(filepath.Join(dir, TerraformStateFileName), os.O_CREATE|os.O_RDWR, 0600) if err != nil { - return err + return diag.FromErr(err) } defer local.Close() @@ -87,12 +89,12 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "Writing remote state file to local cache directory") _, err = io.Copy(local, bytes.NewReader(remote.Bytes())) if err != nil { - return err + return diag.FromErr(err) } return nil } func StatePull() bundle.Mutator { - return &statePull{stateFiler} + return &statePull{deploy.StateFiler} } diff --git a/bundle/deploy/terraform/state_pull_test.go b/bundle/deploy/terraform/state_pull_test.go index 60eb5d90c..26297bfcb 100644 --- a/bundle/deploy/terraform/state_pull_test.go +++ b/bundle/deploy/terraform/state_pull_test.go @@ -11,34 +11,32 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - mock "github.com/databricks/cli/internal/mocks/libs/filer" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" + "github.com/stretchr/testify/mock" ) func mockStateFilerForPull(t *testing.T, contents map[string]int, merr error) filer.Filer { buf, err := json.Marshal(contents) - require.NoError(t, err) + assert.NoError(t, err) - ctrl := gomock.NewController(t) - mock := mock.NewMockFiler(ctrl) - mock. + f := mockfiler.NewMockFiler(t) + f. EXPECT(). - Read(gomock.Any(), gomock.Eq(TerraformStateFileName)). + Read(mock.Anything, TerraformStateFileName). Return(io.NopCloser(bytes.NewReader(buf)), merr). Times(1) - return mock + return f } func statePullTestBundle(t *testing.T) *bundle.Bundle { return &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ Bundle: config.Bundle{ Target: "default", }, - Path: t.TempDir(), }, } } @@ -50,11 +48,11 @@ func TestStatePullLocalMissingRemoteMissing(t *testing.T) { ctx := context.Background() b := statePullTestBundle(t) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that no local state file has been written. - _, err = os.Stat(localStateFile(t, ctx, b)) + _, err := os.Stat(localStateFile(t, ctx, b)) assert.ErrorIs(t, err, fs.ErrNotExist) } @@ -65,8 +63,8 @@ func TestStatePullLocalMissingRemotePresent(t *testing.T) { ctx := context.Background() b := statePullTestBundle(t) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that the local state file has been updated. localState := readLocalState(t, ctx, b) @@ -83,8 +81,8 @@ func TestStatePullLocalStale(t *testing.T) { // Write a stale local state file. writeLocalState(t, ctx, b, map[string]int{"serial": 4}) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that the local state file has been updated. localState := readLocalState(t, ctx, b) @@ -101,8 +99,8 @@ func TestStatePullLocalEqual(t *testing.T) { // Write a local state file with the same serial as the remote. writeLocalState(t, ctx, b, map[string]int{"serial": 5}) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that the local state file has not been updated. localState := readLocalState(t, ctx, b) @@ -119,8 +117,8 @@ func TestStatePullLocalNewer(t *testing.T) { // Write a local state file with a newer serial as the remote. writeLocalState(t, ctx, b, map[string]int{"serial": 6}) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) // Confirm that the local state file has not been updated. localState := readLocalState(t, ctx, b) diff --git a/bundle/deploy/terraform/state_push.go b/bundle/deploy/terraform/state_push.go index a51403295..b50983bd4 100644 --- a/bundle/deploy/terraform/state_push.go +++ b/bundle/deploy/terraform/state_push.go @@ -6,34 +6,36 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" ) type statePush struct { - filerFunc + filerFactory deploy.FilerFactory } func (l *statePush) Name() string { return "terraform:state-push" } -func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { - f, err := l.filerFunc(b) +func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + f, err := l.filerFactory(b) if err != nil { - return err + return diag.FromErr(err) } dir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } // Expect the state file to live under dir. local, err := os.Open(filepath.Join(dir, TerraformStateFileName)) if err != nil { - return err + return diag.FromErr(err) } defer local.Close() @@ -42,12 +44,12 @@ func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error { log.Infof(ctx, "Writing local state file to remote state directory") err = f.Write(ctx, TerraformStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists) if err != nil { - return err + return diag.FromErr(err) } return nil } func StatePush() bundle.Mutator { - return &statePush{stateFiler} + return &statePush{deploy.StateFiler} } diff --git a/bundle/deploy/terraform/state_push_test.go b/bundle/deploy/terraform/state_push_test.go index 4167b3cb9..e054773f3 100644 --- a/bundle/deploy/terraform/state_push_test.go +++ b/bundle/deploy/terraform/state_push_test.go @@ -8,34 +8,32 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - mock "github.com/databricks/cli/internal/mocks/libs/filer" + mockfiler "github.com/databricks/cli/internal/mocks/libs/filer" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/assert" - "go.uber.org/mock/gomock" + "github.com/stretchr/testify/mock" ) func mockStateFilerForPush(t *testing.T, fn func(body io.Reader)) filer.Filer { - ctrl := gomock.NewController(t) - mock := mock.NewMockFiler(ctrl) - mock. + f := mockfiler.NewMockFiler(t) + f. EXPECT(). - Write(gomock.Any(), gomock.Any(), gomock.Any(), filer.CreateParentDirectories, filer.OverwriteIfExists). - Do(func(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode) error { + Write(mock.Anything, mock.Anything, mock.Anything, filer.CreateParentDirectories, filer.OverwriteIfExists). + Run(func(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode) { fn(reader) - return nil }). Return(nil). Times(1) - return mock + return f } func statePushTestBundle(t *testing.T) *bundle.Bundle { return &bundle.Bundle{ + RootPath: t.TempDir(), Config: config.Root{ Bundle: config.Bundle{ Target: "default", }, - Path: t.TempDir(), }, } } @@ -58,6 +56,6 @@ func TestStatePush(t *testing.T) { // Write a stale local state file. writeLocalState(t, ctx, b, map[string]int{"serial": 4}) - err := bundle.Apply(ctx, b, m) - assert.NoError(t, err) + diags := bundle.Apply(ctx, b, m) + assert.NoError(t, diags.Error()) } diff --git a/bundle/deploy/terraform/state_test.go b/bundle/deploy/terraform/state_test.go index ee15b953b..ff3250625 100644 --- a/bundle/deploy/terraform/state_test.go +++ b/bundle/deploy/terraform/state_test.go @@ -8,12 +8,13 @@ import ( "testing" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/libs/filer" "github.com/stretchr/testify/require" ) -// identityFiler returns a filerFunc that returns the specified filer. -func identityFiler(f filer.Filer) filerFunc { +// identityFiler returns a FilerFactory that returns the specified filer. +func identityFiler(f filer.Filer) deploy.FilerFactory { return func(_ *bundle.Bundle) (filer.Filer, error) { return f, nil } diff --git a/bundle/deploy/terraform/tfdyn/convert.go b/bundle/deploy/terraform/tfdyn/convert.go new file mode 100644 index 000000000..9df4e2640 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert.go @@ -0,0 +1,23 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" +) + +type Converter interface { + Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error +} + +var converters = map[string]Converter{} + +func GetConverter(name string) (Converter, bool) { + c, ok := converters[name] + return c, ok +} + +func registerConverter(name string, c Converter) { + converters[name] = c +} diff --git a/bundle/deploy/terraform/tfdyn/convert_experiment.go b/bundle/deploy/terraform/tfdyn/convert_experiment.go new file mode 100644 index 000000000..0c129181f --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_experiment.go @@ -0,0 +1,45 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertExperimentResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceMlflowExperiment{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "experiment normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type experimentConverter struct{} + +func (experimentConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertExperimentResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.MlflowExperiment[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.ExperimentId = fmt.Sprintf("${databricks_mlflow_experiment.%s.id}", key) + out.Permissions["mlflow_experiment_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("experiments", experimentConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_experiment_test.go b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go new file mode 100644 index 000000000..63add4368 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_experiment_test.go @@ -0,0 +1,52 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertExperiment(t *testing.T) { + var src = resources.MlflowExperiment{ + Experiment: &ml.Experiment{ + Name: "name", + }, + Permissions: []resources.Permission{ + { + Level: "CAN_READ", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = experimentConverter{}.Convert(ctx, "my_experiment", vin, out) + require.NoError(t, err) + + // Assert equality on the experiment + assert.Equal(t, map[string]any{ + "name": "name", + }, out.MlflowExperiment["my_experiment"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + ExperimentId: "${databricks_mlflow_experiment.my_experiment.id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_READ", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["mlflow_experiment_my_experiment"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_grants.go b/bundle/deploy/terraform/tfdyn/convert_grants.go new file mode 100644 index 000000000..1ddd99dd7 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_grants.go @@ -0,0 +1,39 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" +) + +func convertGrantsResource(ctx context.Context, vin dyn.Value) *schema.ResourceGrants { + grants, ok := vin.Get("grants").AsSequence() + if !ok || len(grants) == 0 { + return nil + } + + resource := &schema.ResourceGrants{} + for _, permission := range grants { + principal, _ := permission.Get("principal").AsString() + v, _ := permission.Get("privileges").AsSequence() + + // Turn privileges into a slice of strings. + var privileges []string + for _, privilege := range v { + str, ok := privilege.AsString() + if !ok { + continue + } + + privileges = append(privileges, str) + } + + resource.Grant = append(resource.Grant, schema.ResourceGrantsGrant{ + Principal: principal, + Privileges: privileges, + }) + } + + return resource +} diff --git a/bundle/deploy/terraform/tfdyn/convert_grants_test.go b/bundle/deploy/terraform/tfdyn/convert_grants_test.go new file mode 100644 index 000000000..a486bc36f --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_grants_test.go @@ -0,0 +1,71 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertGrants(t *testing.T) { + var src = resources.RegisteredModel{ + Grants: []resources.Grant{ + { + Privileges: []string{"EXECUTE", "FOO"}, + Principal: "jane@doe.com", + }, + { + Privileges: []string{"EXECUTE", "BAR"}, + Principal: "spn", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertGrantsResource(ctx, vin) + require.NotNil(t, resource) + assert.Equal(t, []schema.ResourceGrantsGrant{ + { + Privileges: []string{"EXECUTE", "FOO"}, + Principal: "jane@doe.com", + }, + { + Privileges: []string{"EXECUTE", "BAR"}, + Principal: "spn", + }, + }, resource.Grant) +} + +func TestConvertGrantsNil(t *testing.T) { + var src = resources.RegisteredModel{ + Grants: nil, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertGrantsResource(ctx, vin) + assert.Nil(t, resource) +} + +func TestConvertGrantsEmpty(t *testing.T) { + var src = resources.RegisteredModel{ + Grants: []resources.Grant{}, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertGrantsResource(ctx, vin) + assert.Nil(t, resource) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_job.go b/bundle/deploy/terraform/tfdyn/convert_job.go new file mode 100644 index 000000000..d1e7e73e2 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_job.go @@ -0,0 +1,99 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +func convertJobResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the input value to the underlying job schema. + // This removes superfluous keys and adapts the input to the expected schema. + vin, diags := convert.Normalize(jobs.JobSettings{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary) + } + + // Modify top-level keys. + vout, err := renameKeys(vin, map[string]string{ + "tasks": "task", + "job_clusters": "job_cluster", + "parameters": "parameter", + "environments": "environment", + }) + if err != nil { + return dyn.InvalidValue, err + } + + // Modify keys in the "git_source" block + vout, err = dyn.Map(vout, "git_source", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + return renameKeys(v, map[string]string{ + "git_branch": "branch", + "git_commit": "commit", + "git_provider": "provider", + "git_tag": "tag", + "git_url": "url", + }) + }) + if err != nil { + return dyn.InvalidValue, err + } + + // Modify keys in the "task" blocks + vout, err = dyn.Map(vout, "task", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + // Modify "library" blocks for for_each_task + vout, err = dyn.Map(v, "for_each_task.task", func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + return renameKeys(v, map[string]string{ + "libraries": "library", + }) + }) + + if err != nil { + return dyn.InvalidValue, err + } + + return renameKeys(vout, map[string]string{ + "libraries": "library", + }) + })) + if err != nil { + return dyn.InvalidValue, err + } + + // Normalize the output value to the target schema. + vout, diags = convert.Normalize(schema.ResourceJob{}, vout) + for _, diag := range diags { + log.Debugf(ctx, "job normalization diagnostic: %s", diag.Summary) + } + + return vout, err +} + +type jobConverter struct{} + +func (jobConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertJobResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.Job[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.JobId = fmt.Sprintf("${databricks_job.%s.id}", key) + out.Permissions["job_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("jobs", jobConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_job_test.go b/bundle/deploy/terraform/tfdyn/convert_job_test.go new file mode 100644 index 000000000..b9e1f967f --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_job_test.go @@ -0,0 +1,129 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertJob(t *testing.T) { + var src = resources.Job{ + JobSettings: &jobs.JobSettings{ + Name: "my job", + JobClusters: []jobs.JobCluster{ + { + JobClusterKey: "key", + NewCluster: compute.ClusterSpec{ + SparkVersion: "10.4.x-scala2.12", + }, + }, + }, + GitSource: &jobs.GitSource{ + GitProvider: jobs.GitProviderGitHub, + GitUrl: "https://github.com/foo/bar", + }, + Parameters: []jobs.JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, + Tasks: []jobs.Task{ + { + TaskKey: "task_key", + JobClusterKey: "job_cluster_key", + Libraries: []compute.Library{ + { + Pypi: &compute.PythonPyPiLibrary{ + Package: "package", + }, + }, + { + Whl: "/path/to/my.whl", + }, + }, + }, + }, + }, + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = jobConverter{}.Convert(ctx, "my_job", vin, out) + require.NoError(t, err) + + // Assert equality on the job + assert.Equal(t, map[string]any{ + "name": "my job", + "job_cluster": []any{ + map[string]any{ + "job_cluster_key": "key", + "new_cluster": map[string]any{ + "spark_version": "10.4.x-scala2.12", + }, + }, + }, + "git_source": map[string]any{ + "provider": "gitHub", + "url": "https://github.com/foo/bar", + }, + "parameter": []any{ + map[string]any{ + "name": "param1", + "default": "default1", + }, + map[string]any{ + "name": "param2", + "default": "default2", + }, + }, + "task": []any{ + map[string]any{ + "task_key": "task_key", + "job_cluster_key": "job_cluster_key", + "library": []any{ + map[string]any{ + "pypi": map[string]any{ + "package": "package", + }, + }, + map[string]any{ + "whl": "/path/to/my.whl", + }, + }, + }, + }, + }, out.Job["my_job"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + JobId: "${databricks_job.my_job.id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["job_my_job"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_model.go b/bundle/deploy/terraform/tfdyn/convert_model.go new file mode 100644 index 000000000..f5d7d489b --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_model.go @@ -0,0 +1,45 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertModelResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceMlflowModel{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "model normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type modelConverter struct{} + +func (modelConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertModelResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.MlflowModel[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.RegisteredModelId = fmt.Sprintf("${databricks_mlflow_model.%s.registered_model_id}", key) + out.Permissions["mlflow_model_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("models", modelConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go new file mode 100644 index 000000000..b67e4dcc3 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint.go @@ -0,0 +1,45 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertModelServingEndpointResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceModelServing{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "model serving endpoint normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type modelServingEndpointConverter struct{} + +func (modelServingEndpointConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertModelServingEndpointResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.ModelServing[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.ServingEndpointId = fmt.Sprintf("${databricks_model_serving.%s.serving_endpoint_id}", key) + out.Permissions["model_serving_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("model_serving_endpoints", modelServingEndpointConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go new file mode 100644 index 000000000..63b75e9ab --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_model_serving_endpoint_test.go @@ -0,0 +1,88 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/serving" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertModelServingEndpoint(t *testing.T) { + var src = resources.ModelServingEndpoint{ + CreateServingEndpoint: &serving.CreateServingEndpoint{ + Name: "name", + Config: serving.EndpointCoreConfigInput{ + ServedModels: []serving.ServedModelInput{ + { + ModelName: "model_name", + ModelVersion: "1", + ScaleToZeroEnabled: true, + WorkloadSize: "Small", + }, + }, + TrafficConfig: &serving.TrafficConfig{ + Routes: []serving.Route{ + { + ServedModelName: "model_name-1", + TrafficPercentage: 100, + }, + }, + }, + }, + }, + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = modelServingEndpointConverter{}.Convert(ctx, "my_model_serving_endpoint", vin, out) + require.NoError(t, err) + + // Assert equality on the model serving endpoint + assert.Equal(t, map[string]any{ + "name": "name", + "config": map[string]any{ + "served_models": []any{ + map[string]any{ + "model_name": "model_name", + "model_version": "1", + "scale_to_zero_enabled": true, + "workload_size": "Small", + }, + }, + "traffic_config": map[string]any{ + "routes": []any{ + map[string]any{ + "served_model_name": "model_name-1", + "traffic_percentage": int64(100), + }, + }, + }, + }, + }, out.ModelServing["my_model_serving_endpoint"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + ServingEndpointId: "${databricks_model_serving.my_model_serving_endpoint.serving_endpoint_id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["model_serving_my_model_serving_endpoint"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_model_test.go b/bundle/deploy/terraform/tfdyn/convert_model_test.go new file mode 100644 index 000000000..542caa878 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_model_test.go @@ -0,0 +1,74 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertModel(t *testing.T) { + var src = resources.MlflowModel{ + Model: &ml.Model{ + Name: "name", + Description: "description", + Tags: []ml.ModelTag{ + { + Key: "k1", + Value: "v1", + }, + { + Key: "k2", + Value: "v2", + }, + }, + }, + Permissions: []resources.Permission{ + { + Level: "CAN_READ", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = modelConverter{}.Convert(ctx, "my_model", vin, out) + require.NoError(t, err) + + // Assert equality on the model + assert.Equal(t, map[string]any{ + "name": "name", + "description": "description", + "tags": []any{ + map[string]any{ + "key": "k1", + "value": "v1", + }, + map[string]any{ + "key": "k2", + "value": "v2", + }, + }, + }, out.MlflowModel["my_model"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + RegisteredModelId: "${databricks_mlflow_model.my_model.registered_model_id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_READ", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["mlflow_model_my_model"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_permissions.go b/bundle/deploy/terraform/tfdyn/convert_permissions.go new file mode 100644 index 000000000..99e8d2973 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_permissions.go @@ -0,0 +1,32 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" +) + +func convertPermissionsResource(ctx context.Context, vin dyn.Value) *schema.ResourcePermissions { + permissions, ok := vin.Get("permissions").AsSequence() + if !ok || len(permissions) == 0 { + return nil + } + + resource := &schema.ResourcePermissions{} + for _, permission := range permissions { + level, _ := permission.Get("level").AsString() + userName, _ := permission.Get("user_name").AsString() + groupName, _ := permission.Get("group_name").AsString() + servicePrincipalName, _ := permission.Get("service_principal_name").AsString() + + resource.AccessControl = append(resource.AccessControl, schema.ResourcePermissionsAccessControl{ + PermissionLevel: level, + UserName: userName, + GroupName: groupName, + ServicePrincipalName: servicePrincipalName, + }) + } + + return resource +} diff --git a/bundle/deploy/terraform/tfdyn/convert_permissions_test.go b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go new file mode 100644 index 000000000..ba389020f --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_permissions_test.go @@ -0,0 +1,85 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertPermissions(t *testing.T) { + var src = resources.Job{ + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + { + Level: "CAN_MANAGE", + GroupName: "special admins", + }, + { + Level: "CAN_RUN", + ServicePrincipalName: "spn", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertPermissionsResource(ctx, vin) + require.NotNil(t, resource) + assert.Equal(t, []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_VIEW", + UserName: "jane@doe.com", + GroupName: "", + ServicePrincipalName: "", + }, + { + PermissionLevel: "CAN_MANAGE", + UserName: "", + GroupName: "special admins", + ServicePrincipalName: "", + }, + { + PermissionLevel: "CAN_RUN", + UserName: "", + GroupName: "", + ServicePrincipalName: "spn", + }, + }, resource.AccessControl) +} + +func TestConvertPermissionsNil(t *testing.T) { + var src = resources.Job{ + Permissions: nil, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertPermissionsResource(ctx, vin) + assert.Nil(t, resource) +} + +func TestConvertPermissionsEmpty(t *testing.T) { + var src = resources.Job{ + Permissions: []resources.Permission{}, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + resource := convertPermissionsResource(ctx, vin) + assert.Nil(t, resource) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_pipeline.go b/bundle/deploy/terraform/tfdyn/convert_pipeline.go new file mode 100644 index 000000000..ea0c94d66 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_pipeline.go @@ -0,0 +1,55 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertPipelineResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Modify top-level keys. + vout, err := renameKeys(vin, map[string]string{ + "libraries": "library", + "clusters": "cluster", + "notifications": "notification", + }) + if err != nil { + return dyn.InvalidValue, err + } + + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourcePipeline{}, vout) + for _, diag := range diags { + log.Debugf(ctx, "pipeline normalization diagnostic: %s", diag.Summary) + } + + return vout, err +} + +type pipelineConverter struct{} + +func (pipelineConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertPipelineResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.Pipeline[key] = vout.AsAny() + + // Configure permissions for this resource. + if permissions := convertPermissionsResource(ctx, vin); permissions != nil { + permissions.PipelineId = fmt.Sprintf("${databricks_pipeline.%s.id}", key) + out.Permissions["pipeline_"+key] = permissions + } + + return nil +} + +func init() { + registerConverter("pipelines", pipelineConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go new file mode 100644 index 000000000..7010d463a --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_pipeline_test.go @@ -0,0 +1,128 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertPipeline(t *testing.T) { + var src = resources.Pipeline{ + PipelineSpec: &pipelines.PipelineSpec{ + Name: "my pipeline", + Libraries: []pipelines.PipelineLibrary{ + { + Notebook: &pipelines.NotebookLibrary{ + Path: "notebook path", + }, + }, + { + File: &pipelines.FileLibrary{ + Path: "file path", + }, + }, + }, + Notifications: []pipelines.Notifications{ + { + Alerts: []string{ + "on-update-fatal-failure", + }, + EmailRecipients: []string{ + "jane@doe.com", + }, + }, + { + Alerts: []string{ + "on-update-failure", + "on-flow-failure", + }, + EmailRecipients: []string{ + "jane@doe.com", + "john@doe.com", + }, + }, + }, + Clusters: []pipelines.PipelineCluster{ + { + Label: "default", + NumWorkers: 1, + }, + }, + }, + Permissions: []resources.Permission{ + { + Level: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = pipelineConverter{}.Convert(ctx, "my_pipeline", vin, out) + require.NoError(t, err) + + // Assert equality on the pipeline + assert.Equal(t, map[string]any{ + "name": "my pipeline", + "library": []any{ + map[string]any{ + "notebook": map[string]any{ + "path": "notebook path", + }, + }, + map[string]any{ + "file": map[string]any{ + "path": "file path", + }, + }, + }, + "notification": []any{ + map[string]any{ + "alerts": []any{ + "on-update-fatal-failure", + }, + "email_recipients": []any{ + "jane@doe.com", + }, + }, + map[string]any{ + "alerts": []any{ + "on-update-failure", + "on-flow-failure", + }, + "email_recipients": []any{ + "jane@doe.com", + "john@doe.com", + }, + }, + }, + "cluster": []any{ + map[string]any{ + "label": "default", + "num_workers": int64(1), + }, + }, + }, out.Pipeline["my_pipeline"]) + + // Assert equality on the permissions + assert.Equal(t, &schema.ResourcePermissions{ + PipelineId: "${databricks_pipeline.my_pipeline.id}", + AccessControl: []schema.ResourcePermissionsAccessControl{ + { + PermissionLevel: "CAN_VIEW", + UserName: "jane@doe.com", + }, + }, + }, out.Permissions["pipeline_my_pipeline"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go new file mode 100644 index 000000000..341df7c22 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor.go @@ -0,0 +1,37 @@ +package tfdyn + +import ( + "context" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertQualityMonitorResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceQualityMonitor{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "monitor normalization diagnostic: %s", diag.Summary) + } + return vout, nil +} + +type qualityMonitorConverter struct{} + +func (qualityMonitorConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertQualityMonitorResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.QualityMonitor[key] = vout.AsAny() + + return nil +} + +func init() { + registerConverter("quality_monitors", qualityMonitorConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go new file mode 100644 index 000000000..50bfce7a0 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_quality_monitor_test.go @@ -0,0 +1,46 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertQualityMonitor(t *testing.T) { + var src = resources.QualityMonitor{ + CreateMonitor: &catalog.CreateMonitor{ + TableName: "test_table_name", + AssetsDir: "assets_dir", + OutputSchemaName: "output_schema_name", + InferenceLog: &catalog.MonitorInferenceLog{ + ModelIdCol: "model_id", + PredictionCol: "test_prediction_col", + ProblemType: "PROBLEM_TYPE_CLASSIFICATION", + }, + }, + } + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + ctx := context.Background() + out := schema.NewResources() + err = qualityMonitorConverter{}.Convert(ctx, "my_monitor", vin, out) + + require.NoError(t, err) + assert.Equal(t, map[string]any{ + "assets_dir": "assets_dir", + "output_schema_name": "output_schema_name", + "table_name": "test_table_name", + "inference_log": map[string]any{ + "model_id_col": "model_id", + "prediction_col": "test_prediction_col", + "problem_type": "PROBLEM_TYPE_CLASSIFICATION", + }, + }, out.QualityMonitor["my_monitor"]) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_registered_model.go b/bundle/deploy/terraform/tfdyn/convert_registered_model.go new file mode 100644 index 000000000..20aa596f2 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_registered_model.go @@ -0,0 +1,45 @@ +package tfdyn + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/cli/libs/log" +) + +func convertRegisteredModelResource(ctx context.Context, vin dyn.Value) (dyn.Value, error) { + // Normalize the output value to the target schema. + vout, diags := convert.Normalize(schema.ResourceRegisteredModel{}, vin) + for _, diag := range diags { + log.Debugf(ctx, "registered model normalization diagnostic: %s", diag.Summary) + } + + return vout, nil +} + +type registeredModelConverter struct{} + +func (registeredModelConverter) Convert(ctx context.Context, key string, vin dyn.Value, out *schema.Resources) error { + vout, err := convertRegisteredModelResource(ctx, vin) + if err != nil { + return err + } + + // Add the converted resource to the output. + out.RegisteredModel[key] = vout.AsAny() + + // Configure grants for this resource. + if grants := convertGrantsResource(ctx, vin); grants != nil { + grants.Function = fmt.Sprintf("${databricks_registered_model.%s.id}", key) + out.Grants["registered_model_"+key] = grants + } + + return nil +} + +func init() { + registerConverter("registered_models", registeredModelConverter{}) +} diff --git a/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go new file mode 100644 index 000000000..77096e8d0 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/convert_registered_model_test.go @@ -0,0 +1,58 @@ +package tfdyn + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestConvertRegisteredModel(t *testing.T) { + var src = resources.RegisteredModel{ + CreateRegisteredModelRequest: &catalog.CreateRegisteredModelRequest{ + Name: "name", + CatalogName: "catalog", + SchemaName: "schema", + Comment: "comment", + }, + Grants: []resources.Grant{ + { + Privileges: []string{"EXECUTE"}, + Principal: "jane@doe.com", + }, + }, + } + + vin, err := convert.FromTyped(src, dyn.NilValue) + require.NoError(t, err) + + ctx := context.Background() + out := schema.NewResources() + err = registeredModelConverter{}.Convert(ctx, "my_registered_model", vin, out) + require.NoError(t, err) + + // Assert equality on the registered model + assert.Equal(t, map[string]any{ + "name": "name", + "catalog_name": "catalog", + "schema_name": "schema", + "comment": "comment", + }, out.RegisteredModel["my_registered_model"]) + + // Assert equality on the grants + assert.Equal(t, &schema.ResourceGrants{ + Function: "${databricks_registered_model.my_registered_model.id}", + Grant: []schema.ResourceGrantsGrant{ + { + Privileges: []string{"EXECUTE"}, + Principal: "jane@doe.com", + }, + }, + }, out.Grants["registered_model_my_registered_model"]) +} diff --git a/bundle/deploy/terraform/tfdyn/rename_keys.go b/bundle/deploy/terraform/tfdyn/rename_keys.go new file mode 100644 index 000000000..650ffb890 --- /dev/null +++ b/bundle/deploy/terraform/tfdyn/rename_keys.go @@ -0,0 +1,46 @@ +package tfdyn + +import ( + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/merge" +) + +// renameKeys renames keys in the given map value. +// +// Terraform resources sometimes use singular names for repeating blocks where the API +// definition uses the plural name. This function can convert between the two. +func renameKeys(v dyn.Value, rename map[string]string) (dyn.Value, error) { + var err error + var acc = dyn.V(map[string]dyn.Value{}) + + nv, err := dyn.Walk(v, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + if len(p) == 0 { + return v, nil + } + + // Check if this key should be renamed. + for oldKey, newKey := range rename { + if p[0].Key() != oldKey { + continue + } + + // Add the new key to the accumulator. + p[0] = dyn.Key(newKey) + acc, err = dyn.SetByPath(acc, p, v) + if err != nil { + return dyn.InvalidValue, err + } + return dyn.InvalidValue, dyn.ErrDrop + } + + // Pass through all other values. + return v, dyn.ErrSkip + }) + + if err != nil { + return dyn.InvalidValue, err + } + + // Merge the accumulator with the original value. + return merge.Merge(nv, acc) +} diff --git a/bundle/deploy/terraform/unbind.go b/bundle/deploy/terraform/unbind.go new file mode 100644 index 000000000..49d65615e --- /dev/null +++ b/bundle/deploy/terraform/unbind.go @@ -0,0 +1,42 @@ +package terraform + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/hashicorp/terraform-exec/tfexec" +) + +type unbind struct { + resourceType string + resourceKey string +} + +func (m *unbind) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + tf := b.Terraform + if tf == nil { + return diag.Errorf("terraform not initialized") + } + + err := tf.Init(ctx, tfexec.Upgrade(true)) + if err != nil { + return diag.Errorf("terraform init: %v", err) + } + + err = tf.StateRm(ctx, fmt.Sprintf("%s.%s", m.resourceType, m.resourceKey)) + if err != nil { + return diag.Errorf("terraform state rm: %v", err) + } + + return nil +} + +func (*unbind) Name() string { + return "terraform.Unbind" +} + +func Unbind(resourceType string, resourceKey string) bundle.Mutator { + return &unbind{resourceType: resourceType, resourceKey: resourceKey} +} diff --git a/bundle/deploy/terraform/util.go b/bundle/deploy/terraform/util.go index a5978b397..1a8a83ac7 100644 --- a/bundle/deploy/terraform/util.go +++ b/bundle/deploy/terraform/util.go @@ -1,14 +1,46 @@ package terraform import ( + "context" "encoding/json" + "errors" "io" + "os" + "path/filepath" + + "github.com/databricks/cli/bundle" + tfjson "github.com/hashicorp/terraform-json" ) -type state struct { +// Partial representation of the Terraform state file format. +// We are only interested global version and serial numbers, +// plus resource types, names, modes, and ids. +type resourcesState struct { + Version int `json:"version"` + Resources []stateResource `json:"resources"` +} + +const SupportedStateVersion = 4 + +type serialState struct { Serial int `json:"serial"` } +type stateResource struct { + Type string `json:"type"` + Name string `json:"name"` + Mode tfjson.ResourceMode `json:"mode"` + Instances []stateResourceInstance `json:"instances"` +} + +type stateResourceInstance struct { + Attributes stateInstanceAttributes `json:"attributes"` +} + +type stateInstanceAttributes struct { + ID string `json:"id"` +} + func IsLocalStateStale(local io.Reader, remote io.Reader) bool { localState, err := loadState(local) if err != nil { @@ -23,12 +55,12 @@ func IsLocalStateStale(local io.Reader, remote io.Reader) bool { return localState.Serial < remoteState.Serial } -func loadState(input io.Reader) (*state, error) { +func loadState(input io.Reader) (*serialState, error) { content, err := io.ReadAll(input) if err != nil { return nil, err } - var s state + var s serialState err = json.Unmarshal(content, &s) if err != nil { return nil, err @@ -36,3 +68,20 @@ func loadState(input io.Reader) (*state, error) { return &s, nil } + +func ParseResourcesState(ctx context.Context, b *bundle.Bundle) (*resourcesState, error) { + cacheDir, err := Dir(ctx, b) + if err != nil { + return nil, err + } + rawState, err := os.ReadFile(filepath.Join(cacheDir, TerraformStateFileName)) + if err != nil { + if errors.Is(err, os.ErrNotExist) { + return &resourcesState{Version: SupportedStateVersion}, nil + } + return nil, err + } + var state resourcesState + err = json.Unmarshal(rawState, &state) + return &state, err +} diff --git a/bundle/deploy/terraform/util_test.go b/bundle/deploy/terraform/util_test.go index 4f2cf2918..8949ebca8 100644 --- a/bundle/deploy/terraform/util_test.go +++ b/bundle/deploy/terraform/util_test.go @@ -1,11 +1,16 @@ package terraform import ( + "context" "fmt" + "os" + "path/filepath" "strings" "testing" "testing/iotest" + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" "github.com/stretchr/testify/assert" ) @@ -38,3 +43,97 @@ func TestLocalStateMarkNonStaleWhenRemoteFailsToLoad(t *testing.T) { remote := iotest.ErrReader(fmt.Errorf("Random error")) assert.False(t, IsLocalStateStale(local, remote)) } + +func TestParseResourcesStateWithNoFile(t *testing.T) { + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{ + ExecPath: "terraform", + }, + }, + }, + } + state, err := ParseResourcesState(context.Background(), b) + assert.NoError(t, err) + assert.Equal(t, &resourcesState{Version: SupportedStateVersion}, state) +} + +func TestParseResourcesStateWithExistingStateFile(t *testing.T) { + ctx := context.Background() + b := &bundle.Bundle{ + RootPath: t.TempDir(), + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + Terraform: &config.Terraform{ + ExecPath: "terraform", + }, + }, + }, + } + cacheDir, err := Dir(ctx, b) + assert.NoError(t, err) + data := []byte(`{ + "version": 4, + "unknown_field": "hello", + "resources": [ + { + "mode": "managed", + "type": "databricks_pipeline", + "name": "test_pipeline", + "provider": "provider[\"registry.terraform.io/databricks/databricks\"]", + "instances": [ + { + "schema_version": 0, + "attributes": { + "allow_duplicate_names": false, + "catalog": null, + "channel": "CURRENT", + "cluster": [], + "random_field": "random_value", + "configuration": { + "bundle.sourcePath": "/Workspace//Users/user/.bundle/test/dev/files/src" + }, + "continuous": false, + "development": true, + "edition": "ADVANCED", + "filters": [], + "id": "123", + "library": [], + "name": "test_pipeline", + "notification": [], + "photon": false, + "serverless": false, + "storage": "dbfs:/123456", + "target": "test_dev", + "timeouts": null, + "url": "https://test.com" + }, + "sensitive_attributes": [] + } + ] + } + ] + }`) + err = os.WriteFile(filepath.Join(cacheDir, TerraformStateFileName), data, os.ModePerm) + assert.NoError(t, err) + state, err := ParseResourcesState(ctx, b) + assert.NoError(t, err) + expected := &resourcesState{ + Version: 4, + Resources: []stateResource{ + { + Mode: "managed", + Type: "databricks_pipeline", + Name: "test_pipeline", + Instances: []stateResourceInstance{ + {Attributes: stateInstanceAttributes{ID: "123"}}, + }, + }, + }, + } + assert.Equal(t, expected, state) +} diff --git a/bundle/deploy/terraform/write.go b/bundle/deploy/terraform/write.go index b53f9069d..bee777ffe 100644 --- a/bundle/deploy/terraform/write.go +++ b/bundle/deploy/terraform/write.go @@ -7,6 +7,9 @@ import ( "path/filepath" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/internal/tf/schema" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" ) type write struct{} @@ -15,16 +18,24 @@ func (w *write) Name() string { return "terraform.Write" } -func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { +func (w *write) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { dir, err := Dir(ctx, b) if err != nil { - return err + return diag.FromErr(err) } - root := BundleToTerraform(&b.Config) - f, err := os.Create(filepath.Join(dir, "bundle.tf.json")) + var root *schema.Root + err = b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + root, err = BundleToTerraformWithDynValue(ctx, v) + return v, err + }) if err != nil { - return err + return diag.FromErr(err) + } + + f, err := os.Create(filepath.Join(dir, TerraformConfigFileName)) + if err != nil { + return diag.FromErr(err) } defer f.Close() @@ -33,7 +44,7 @@ func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error { enc.SetIndent("", " ") err = enc.Encode(root) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/env/includes.go b/bundle/env/includes.go deleted file mode 100644 index 4ade01877..000000000 --- a/bundle/env/includes.go +++ /dev/null @@ -1,14 +0,0 @@ -package env - -import "context" - -// IncludesVariable names the environment variable that holds additional configuration paths to include -// during bundle configuration loading. Also see `bundle/config/mutator/process_root_includes.go`. -const IncludesVariable = "DATABRICKS_BUNDLE_INCLUDES" - -// Includes returns the bundle Includes environment variable. -func Includes(ctx context.Context) (string, bool) { - return get(ctx, []string{ - IncludesVariable, - }) -} diff --git a/bundle/env/includes_test.go b/bundle/env/includes_test.go deleted file mode 100644 index d9366a59f..000000000 --- a/bundle/env/includes_test.go +++ /dev/null @@ -1,28 +0,0 @@ -package env - -import ( - "context" - "testing" - - "github.com/databricks/cli/internal/testutil" - "github.com/stretchr/testify/assert" -) - -func TestIncludes(t *testing.T) { - ctx := context.Background() - - testutil.CleanupEnvironment(t) - - t.Run("set", func(t *testing.T) { - t.Setenv("DATABRICKS_BUNDLE_INCLUDES", "foo") - includes, ok := Includes(ctx) - assert.True(t, ok) - assert.Equal(t, "foo", includes) - }) - - t.Run("not set", func(t *testing.T) { - includes, ok := Includes(ctx) - assert.False(t, ok) - assert.Equal(t, "", includes) - }) -} diff --git a/bundle/if.go b/bundle/if.go new file mode 100644 index 000000000..bad1d72d2 --- /dev/null +++ b/bundle/if.go @@ -0,0 +1,40 @@ +package bundle + +import ( + "context" + + "github.com/databricks/cli/libs/diag" +) + +type ifMutator struct { + condition func(context.Context, *Bundle) (bool, error) + onTrueMutator Mutator + onFalseMutator Mutator +} + +func If( + condition func(context.Context, *Bundle) (bool, error), + onTrueMutator Mutator, + onFalseMutator Mutator, +) Mutator { + return &ifMutator{ + condition, onTrueMutator, onFalseMutator, + } +} + +func (m *ifMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { + v, err := m.condition(ctx, b) + if err != nil { + return diag.FromErr(err) + } + + if v { + return Apply(ctx, b, m.onTrueMutator) + } else { + return Apply(ctx, b, m.onFalseMutator) + } +} + +func (m *ifMutator) Name() string { + return "If" +} diff --git a/bundle/if_test.go b/bundle/if_test.go new file mode 100644 index 000000000..b3fc0b9d9 --- /dev/null +++ b/bundle/if_test.go @@ -0,0 +1,53 @@ +package bundle + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestIfMutatorTrue(t *testing.T) { + m1 := &testMutator{} + m2 := &testMutator{} + ifMutator := If(func(context.Context, *Bundle) (bool, error) { + return true, nil + }, m1, m2) + + b := &Bundle{} + diags := Apply(context.Background(), b, ifMutator) + assert.NoError(t, diags.Error()) + + assert.Equal(t, 1, m1.applyCalled) + assert.Equal(t, 0, m2.applyCalled) +} + +func TestIfMutatorFalse(t *testing.T) { + m1 := &testMutator{} + m2 := &testMutator{} + ifMutator := If(func(context.Context, *Bundle) (bool, error) { + return false, nil + }, m1, m2) + + b := &Bundle{} + diags := Apply(context.Background(), b, ifMutator) + assert.NoError(t, diags.Error()) + + assert.Equal(t, 0, m1.applyCalled) + assert.Equal(t, 1, m2.applyCalled) +} + +func TestIfMutatorError(t *testing.T) { + m1 := &testMutator{} + m2 := &testMutator{} + ifMutator := If(func(context.Context, *Bundle) (bool, error) { + return true, assert.AnError + }, m1, m2) + + b := &Bundle{} + diags := Apply(context.Background(), b, ifMutator) + assert.Error(t, diags.Error()) + + assert.Equal(t, 0, m1.applyCalled) + assert.Equal(t, 0, m2.applyCalled) +} diff --git a/bundle/internal/bundletest/location.go b/bundle/internal/bundletest/location.go new file mode 100644 index 000000000..ebec43d30 --- /dev/null +++ b/bundle/internal/bundletest/location.go @@ -0,0 +1,34 @@ +package bundletest + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/dyn" +) + +// SetLocation sets the location of all values in the bundle to the given path. +// This is useful for testing where we need to associate configuration +// with the path it is loaded from. +func SetLocation(b *bundle.Bundle, prefix string, filePath string) { + start := dyn.MustPathFromString(prefix) + b.Config.Mutate(func(root dyn.Value) (dyn.Value, error) { + return dyn.Walk(root, func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + // If the path has the given prefix, set the location. + if p.HasPrefix(start) { + return v.WithLocations([]dyn.Location{{ + File: filePath, + }}), nil + } + + // The path is not nested under the given prefix. + // If the path is a prefix of the prefix, keep traversing and return the node verbatim. + if start.HasPrefix(p) { + return v, nil + } + + // Return verbatim, but skip traversal. + return v, dyn.ErrSkip + }) + }) + + b.Config.ConfigureConfigFilePath() +} diff --git a/bundle/internal/tf/codegen/go.mod b/bundle/internal/tf/codegen/go.mod index 7820cb705..67ac4bbc7 100644 --- a/bundle/internal/tf/codegen/go.mod +++ b/bundle/internal/tf/codegen/go.mod @@ -1,20 +1,24 @@ module github.com/databricks/cli/bundle/internal/tf/codegen -go 1.18 +go 1.21 require ( github.com/hashicorp/go-version v1.6.0 - github.com/hashicorp/hc-install v0.5.0 - github.com/hashicorp/terraform-exec v0.17.3 - github.com/hashicorp/terraform-json v0.15.0 - github.com/iancoleman/strcase v0.2.0 - github.com/zclconf/go-cty v1.12.1 - golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb + github.com/hashicorp/hc-install v0.6.3 + github.com/hashicorp/terraform-exec v0.20.0 + github.com/hashicorp/terraform-json v0.21.0 + github.com/iancoleman/strcase v0.3.0 + github.com/zclconf/go-cty v1.14.2 + golang.org/x/exp v0.0.0-20240213143201-ec583247a57a ) require ( + github.com/ProtonMail/go-crypto v1.1.0-alpha.0 // indirect + github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect + github.com/cloudflare/circl v1.3.7 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect - golang.org/x/crypto v0.17.0 // indirect - golang.org/x/mod v0.8.0 // indirect + golang.org/x/crypto v0.19.0 // indirect + golang.org/x/mod v0.15.0 // indirect + golang.org/x/sys v0.17.0 // indirect golang.org/x/text v0.14.0 // indirect ) diff --git a/bundle/internal/tf/codegen/go.sum b/bundle/internal/tf/codegen/go.sum index 3ebd90ccb..7a4023ba5 100644 --- a/bundle/internal/tf/codegen/go.sum +++ b/bundle/internal/tf/codegen/go.sum @@ -1,195 +1,68 @@ -github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/sprig/v3 v3.2.1/go.mod h1:UoaO7Yp8KlPnJIYWTFkMaqPUYKTfGFPhxNuwnnxkKlk= -github.com/Microsoft/go-winio v0.4.14/go.mod h1:qXqCSQ3Xa7+6tgxaGTIe4Kpcdsi+P8jBhyzoq1bpyYA= -github.com/Microsoft/go-winio v0.4.16 h1:FtSW/jqD+l4ba5iPBj9CODVtgfYAD8w2wS923g/cFDk= -github.com/Microsoft/go-winio v0.4.16/go.mod h1:XB6nPKklQyQ7GC9LdcBEcBl8PF76WugXOPRXwdLnMv0= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7 h1:YoJbenK9C67SkzkDfmQuVln04ygHj3vjZfd9FL+GmQQ= -github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/9nQmJSSwwds7ejkxaJwO37dru3geImFUdJlaLzQo= -github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= -github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= -github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= -github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= -github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= +dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= +github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= +github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0 h1:nHGfwXmFvJrSR9xu8qL7BkO4DqTHXE9N5vPhgY2I+j0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.0/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= +github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= +github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= +github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= +github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= +github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= +github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emirpasic/gods v1.12.0 h1:QAUIPSaCu4G+POclxeqb3F+WPpdKqFGlw36+yOzGlrg= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= -github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-git/gcfg v1.5.0 h1:Q5ViNfGF8zFgyJWPqYwA7qGFoMTEiBmdlkcfRmpIMa4= -github.com/go-git/gcfg v1.5.0/go.mod h1:5m20vg6GwYabIxaOonVkTdrILxQMpEShl1xiMF4ua+E= -github.com/go-git/go-billy/v5 v5.2.0/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-billy/v5 v5.3.1 h1:CPiOUAzKtMRvolEKw+bG1PLRpT7D3LIs3/3ey4Aiu34= -github.com/go-git/go-billy/v5 v5.3.1/go.mod h1:pmpqyWchKfYfrkb/UVH4otLvyi/5gJlGI4Hb3ZqZ3W0= -github.com/go-git/go-git-fixtures/v4 v4.2.1/go.mod h1:K8zd3kDUAykwTdDCr+I0per6Y6vMiRR/nnVTBtavnB0= -github.com/go-git/go-git/v5 v5.4.2 h1:BXyZu9t0VkbiHtqrsvdq39UDhGJTl1h55VW6CSC4aY4= -github.com/go-git/go-git/v5 v5.4.2/go.mod h1:gQ1kArt6d+n+BGd+/B/I74HwRTLhth2+zti4ihgckDc= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= -github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= -github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= -github.com/hashicorp/go-checkpoint v0.5.0/go.mod h1:7nfLNL10NsxqO4iWuW6tWW0HjZuDrwkBuEQsVcpCOgg= -github.com/hashicorp/go-cleanhttp v0.5.0/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= +github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI= +github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= +github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= +github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= +github.com/go-git/go-git/v5 v5.11.0 h1:XIZc1p+8YzypNr34itUfSvYJcv+eYdTnTvOZ2vD3cA4= +github.com/go-git/go-git/v5 v5.11.0/go.mod h1:6GFcX2P3NM7FPBfpePbpLd21XxsgdAt+lKqXmCUiUCY= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE= +github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-multierror v1.1.1/go.mod h1:iw975J/qwKPdAO1clOe2L8331t/9/fmwbPZ6JB6eMoM= -github.com/hashicorp/go-uuid v1.0.0/go.mod h1:6SBZvOh/SIDV7/2o3Jml5SYk/TvGqwFJ/bN7x4byOro= github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.5.0 h1:D9bl4KayIYKEeJ4vUDe9L5huqxZXczKaykSRcmQ0xY0= -github.com/hashicorp/hc-install v0.5.0/go.mod h1:JyzMfbzfSBSjoDCRPna1vi/24BEDxFaCPfdHtM5SCdo= -github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= -github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= -github.com/hashicorp/terraform-json v0.15.0 h1:/gIyNtR6SFw6h5yzlbDbACyGvIhKtQi8mTsbkNd79lE= -github.com/hashicorp/terraform-json v0.15.0/go.mod h1:+L1RNzjDU5leLFZkHTFTbJXaoqUC6TqXlFgDoOXrtvk= -github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= -github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= -github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/hashicorp/hc-install v0.6.3 h1:yE/r1yJvWbtrJ0STwScgEnCanb0U9v7zp0Gbkmcoxqs= +github.com/hashicorp/hc-install v0.6.3/go.mod h1:KamGdbodYzlufbWh4r9NRo8y6GLHWZP2GBtdnms1Ln0= +github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= +github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= +github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= +github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= +github.com/iancoleman/strcase v0.3.0 h1:nTXanmYxhfFAMjZL34Ov6gkzEsSJZ5DbhxWjvSASxEI= +github.com/iancoleman/strcase v0.3.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= -github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351 h1:DowS9hvgyYSX4TO5NpyC606/Z4SxnNYbT+WX27or6Ck= -github.com/kevinburke/ssh_config v0.0.0-20201106050909-4977a11b4351/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= -github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= -github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= -github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= -github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= -github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= -github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= -github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= -github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= -github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= -github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= -github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4= +github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= +github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4= +github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= -github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= -github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= -github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= -github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= -github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= -github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= -github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= -github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= -github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= -github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.12.1 h1:PcupnljUm9EIvbgSHQnHhUr3fO6oFmkOrvs2BAFNXXY= -github.com/zclconf/go-cty v1.12.1/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20200414173820-0848c9571904/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE= -golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU= -golang.org/x/crypto v0.17.0 h1:r8bRNjWL3GshPW3gkd+RpvzWrZAwPS49OmTGZ/uhM4k= -golang.org/x/crypto v0.17.0/go.mod h1:gCAAfMLgwOJRpTjQ2zCCt2OcSfYMTeZVSRtQlPC7Nq4= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb h1:PaBZQdo+iSDyHT053FjUCgZQ/9uqVwPOcl7KSWhKn6w= -golang.org/x/exp v0.0.0-20230213192124-5e25df0256eb/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA= -golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw= -golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws= -golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210502180810-71e4cd670f79/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18= -golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= -golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= -golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k= -golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= +github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= +github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= +github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= +github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= +github.com/zclconf/go-cty v1.14.2 h1:kTG7lqmBou0Zkx35r6HJHUQTvaRPr5bIAf3AoHS0izI= +github.com/zclconf/go-cty v1.14.2/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= +golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a h1:HinSgX1tJRX3KsL//Gxynpw5CTOAIPhgL4W8PNiIpVE= +golang.org/x/exp v0.0.0-20240213143201-ec583247a57a/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= +golang.org/x/mod v0.15.0 h1:SernR4v+D55NyBH2QiEQrlBAnj1ECL6AGrA5+dPaMY8= +golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/net v0.19.0 h1:zTwKpTd2XuCqf8huc7Fo2iSy+4RHPd10s4KzeTnVr1c= +golang.org/x/net v0.19.0/go.mod h1:CfAk/cbD4CthTvqiEl8NpboMuiuOYsAr/7NOjZJtv1U= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= -golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +golang.org/x/tools v0.18.0 h1:k8NLag8AGHnn+PHbl7g43CtqZAwG60vZkLqgyZgIHgQ= +golang.org/x/tools v0.18.0/go.mod h1:GL7B4CwcLLeo59yx/9UWWuNOW1n3VZ4f5axWfML7Lcg= gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= diff --git a/bundle/internal/tf/codegen/schema/schema.go b/bundle/internal/tf/codegen/schema/schema.go index 534da4a02..f94b94f04 100644 --- a/bundle/internal/tf/codegen/schema/schema.go +++ b/bundle/internal/tf/codegen/schema/schema.go @@ -2,6 +2,8 @@ package schema import ( "context" + "errors" + "io/fs" "os" "path/filepath" @@ -41,7 +43,7 @@ func Load(ctx context.Context) (*tfjson.ProviderSchema, error) { } // Generate schema file if it doesn't exist. - if _, err := os.Stat(s.ProviderSchemaFile); os.IsNotExist(err) { + if _, err := os.Stat(s.ProviderSchemaFile); errors.Is(err, fs.ErrNotExist) { err = s.Generate(ctx) if err != nil { return nil, err diff --git a/bundle/internal/tf/codegen/schema/version.go b/bundle/internal/tf/codegen/schema/version.go index d141592a8..a99f15a40 100644 --- a/bundle/internal/tf/codegen/schema/version.go +++ b/bundle/internal/tf/codegen/schema/version.go @@ -1,3 +1,3 @@ package schema -const ProviderVersion = "1.31.1" +const ProviderVersion = "1.48.0" diff --git a/bundle/internal/tf/codegen/templates/data_sources.go.tmpl b/bundle/internal/tf/codegen/templates/data_sources.go.tmpl index 21baf33ea..9d998d497 100644 --- a/bundle/internal/tf/codegen/templates/data_sources.go.tmpl +++ b/bundle/internal/tf/codegen/templates/data_sources.go.tmpl @@ -4,14 +4,14 @@ package schema type DataSources struct { {{- range .Blocks }} - {{ .FieldName }} map[string]*{{ .TypeName }} `json:"{{ .TerraformName }},omitempty"` + {{ .FieldName }} map[string]any `json:"{{ .TerraformName }},omitempty"` {{- end }} } func NewDataSources() *DataSources { return &DataSources{ {{- range .Blocks }} - {{ .FieldName }}: make(map[string]*{{ .TypeName }}), + {{ .FieldName }}: make(map[string]any), {{- end }} } } diff --git a/bundle/internal/tf/codegen/templates/resources.go.tmpl b/bundle/internal/tf/codegen/templates/resources.go.tmpl index d18151871..91407a00e 100644 --- a/bundle/internal/tf/codegen/templates/resources.go.tmpl +++ b/bundle/internal/tf/codegen/templates/resources.go.tmpl @@ -4,14 +4,14 @@ package schema type Resources struct { {{- range .Blocks }} - {{ .FieldName }} map[string]*{{ .TypeName }} `json:"{{ .TerraformName }},omitempty"` + {{ .FieldName }} map[string]any `json:"{{ .TerraformName }},omitempty"` {{- end }} } func NewResources() *Resources { return &Resources{ {{- range .Blocks }} - {{ .FieldName }}: make(map[string]*{{ .TypeName }}), + {{ .FieldName }}: make(map[string]any), {{- end }} } } diff --git a/bundle/internal/tf/codegen/templates/root.go.tmpl b/bundle/internal/tf/codegen/templates/root.go.tmpl index 57fa71299..e03e978f0 100644 --- a/bundle/internal/tf/codegen/templates/root.go.tmpl +++ b/bundle/internal/tf/codegen/templates/root.go.tmpl @@ -19,13 +19,17 @@ type Root struct { Resource *Resources `json:"resource,omitempty"` } +const ProviderHost = "registry.terraform.io" +const ProviderSource = "databricks/databricks" +const ProviderVersion = "{{ .ProviderVersion }}" + func NewRoot() *Root { return &Root{ Terraform: map[string]interface{}{ "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ - "source": "databricks/databricks", - "version": "{{ .ProviderVersion }}", + "source": ProviderSource, + "version": ProviderVersion, }, }, }, diff --git a/bundle/internal/tf/schema/config.go b/bundle/internal/tf/schema/config.go index d24d57339..a2de987ec 100644 --- a/bundle/internal/tf/schema/config.go +++ b/bundle/internal/tf/schema/config.go @@ -28,6 +28,7 @@ type Config struct { Profile string `json:"profile,omitempty"` RateLimit int `json:"rate_limit,omitempty"` RetryTimeoutSeconds int `json:"retry_timeout_seconds,omitempty"` + ServerlessComputeId string `json:"serverless_compute_id,omitempty"` SkipVerify bool `json:"skip_verify,omitempty"` Token string `json:"token,omitempty"` Username string `json:"username,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go b/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go index 4886a9098..d639c82a8 100644 --- a/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go +++ b/bundle/internal/tf/schema/data_source_aws_crossaccount_policy.go @@ -3,7 +3,12 @@ package schema type DataSourceAwsCrossaccountPolicy struct { - Id string `json:"id,omitempty"` - Json string `json:"json,omitempty"` - PassRoles []string `json:"pass_roles,omitempty"` + AwsAccountId string `json:"aws_account_id,omitempty"` + Id string `json:"id,omitempty"` + Json string `json:"json,omitempty"` + PassRoles []string `json:"pass_roles,omitempty"` + PolicyType string `json:"policy_type,omitempty"` + Region string `json:"region,omitempty"` + SecurityGroupId string `json:"security_group_id,omitempty"` + VpcId string `json:"vpc_id,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go b/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go new file mode 100644 index 000000000..14d5c169d --- /dev/null +++ b/bundle/internal/tf/schema/data_source_aws_unity_catalog_assume_role_policy.go @@ -0,0 +1,12 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAwsUnityCatalogAssumeRolePolicy struct { + AwsAccountId string `json:"aws_account_id"` + ExternalId string `json:"external_id"` + Id string `json:"id,omitempty"` + Json string `json:"json,omitempty"` + RoleName string `json:"role_name"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go b/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go new file mode 100644 index 000000000..2832bdf72 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_aws_unity_catalog_policy.go @@ -0,0 +1,12 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceAwsUnityCatalogPolicy struct { + AwsAccountId string `json:"aws_account_id"` + BucketName string `json:"bucket_name"` + Id string `json:"id,omitempty"` + Json string `json:"json,omitempty"` + KmsName string `json:"kms_name,omitempty"` + RoleName string `json:"role_name"` +} diff --git a/bundle/internal/tf/schema/data_source_catalog.go b/bundle/internal/tf/schema/data_source_catalog.go new file mode 100644 index 000000000..6f9237cfa --- /dev/null +++ b/bundle/internal/tf/schema/data_source_catalog.go @@ -0,0 +1,46 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceCatalogCatalogInfoEffectivePredictiveOptimizationFlag struct { + InheritedFromName string `json:"inherited_from_name,omitempty"` + InheritedFromType string `json:"inherited_from_type,omitempty"` + Value string `json:"value"` +} + +type DataSourceCatalogCatalogInfoProvisioningInfo struct { + State string `json:"state,omitempty"` +} + +type DataSourceCatalogCatalogInfo struct { + BrowseOnly bool `json:"browse_only,omitempty"` + CatalogType string `json:"catalog_type,omitempty"` + Comment string `json:"comment,omitempty"` + ConnectionName string `json:"connection_name,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"` + FullName string `json:"full_name,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Options map[string]string `json:"options,omitempty"` + Owner string `json:"owner,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + ProviderName string `json:"provider_name,omitempty"` + SecurableKind string `json:"securable_kind,omitempty"` + SecurableType string `json:"securable_type,omitempty"` + ShareName string `json:"share_name,omitempty"` + StorageLocation string `json:"storage_location,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + EffectivePredictiveOptimizationFlag *DataSourceCatalogCatalogInfoEffectivePredictiveOptimizationFlag `json:"effective_predictive_optimization_flag,omitempty"` + ProvisioningInfo *DataSourceCatalogCatalogInfoProvisioningInfo `json:"provisioning_info,omitempty"` +} + +type DataSourceCatalog struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + CatalogInfo *DataSourceCatalogCatalogInfo `json:"catalog_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_cluster.go b/bundle/internal/tf/schema/data_source_cluster.go index ce1ad034c..fff66dc93 100644 --- a/bundle/internal/tf/schema/data_source_cluster.go +++ b/bundle/internal/tf/schema/data_source_cluster.go @@ -96,7 +96,7 @@ type DataSourceClusterClusterInfoGcpAttributes struct { } type DataSourceClusterClusterInfoInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScriptsDbfs struct { @@ -104,11 +104,11 @@ type DataSourceClusterClusterInfoInitScriptsDbfs struct { } type DataSourceClusterClusterInfoInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScriptsS3 struct { @@ -122,11 +122,11 @@ type DataSourceClusterClusterInfoInitScriptsS3 struct { } type DataSourceClusterClusterInfoInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceClusterClusterInfoInitScripts struct { diff --git a/bundle/internal/tf/schema/data_source_current_metastore.go b/bundle/internal/tf/schema/data_source_current_metastore.go new file mode 100644 index 000000000..11e647fd3 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_current_metastore.go @@ -0,0 +1,29 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceCurrentMetastoreMetastoreInfo struct { + Cloud string `json:"cloud,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + DefaultDataAccessConfigId string `json:"default_data_access_config_id,omitempty"` + DeltaSharingOrganizationName string `json:"delta_sharing_organization_name,omitempty"` + DeltaSharingRecipientTokenLifetimeInSeconds int `json:"delta_sharing_recipient_token_lifetime_in_seconds,omitempty"` + DeltaSharingScope string `json:"delta_sharing_scope,omitempty"` + GlobalMetastoreId string `json:"global_metastore_id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + PrivilegeModelVersion string `json:"privilege_model_version,omitempty"` + Region string `json:"region,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` + StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"` + StorageRootCredentialName string `json:"storage_root_credential_name,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` +} + +type DataSourceCurrentMetastore struct { + Id string `json:"id,omitempty"` + MetastoreInfo *DataSourceCurrentMetastoreMetastoreInfo `json:"metastore_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_directory.go b/bundle/internal/tf/schema/data_source_directory.go index 6841b6074..555c8d756 100644 --- a/bundle/internal/tf/schema/data_source_directory.go +++ b/bundle/internal/tf/schema/data_source_directory.go @@ -3,7 +3,8 @@ package schema type DataSourceDirectory struct { - Id string `json:"id,omitempty"` - ObjectId int `json:"object_id,omitempty"` - Path string `json:"path"` + Id string `json:"id,omitempty"` + ObjectId int `json:"object_id,omitempty"` + Path string `json:"path"` + WorkspacePath string `json:"workspace_path,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_external_location.go b/bundle/internal/tf/schema/data_source_external_location.go new file mode 100644 index 000000000..a3e78cbd3 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_external_location.go @@ -0,0 +1,37 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceExternalLocationExternalLocationInfoEncryptionDetailsSseEncryptionDetails struct { + Algorithm string `json:"algorithm,omitempty"` + AwsKmsKeyArn string `json:"aws_kms_key_arn,omitempty"` +} + +type DataSourceExternalLocationExternalLocationInfoEncryptionDetails struct { + SseEncryptionDetails *DataSourceExternalLocationExternalLocationInfoEncryptionDetailsSseEncryptionDetails `json:"sse_encryption_details,omitempty"` +} + +type DataSourceExternalLocationExternalLocationInfo struct { + AccessPoint string `json:"access_point,omitempty"` + BrowseOnly bool `json:"browse_only,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + CredentialId string `json:"credential_id,omitempty"` + CredentialName string `json:"credential_name,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + ReadOnly bool `json:"read_only,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + Url string `json:"url,omitempty"` + EncryptionDetails *DataSourceExternalLocationExternalLocationInfoEncryptionDetails `json:"encryption_details,omitempty"` +} + +type DataSourceExternalLocation struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + ExternalLocationInfo *DataSourceExternalLocationExternalLocationInfo `json:"external_location_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_external_locations.go b/bundle/internal/tf/schema/data_source_external_locations.go new file mode 100644 index 000000000..05b7b59c3 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_external_locations.go @@ -0,0 +1,8 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceExternalLocations struct { + Id string `json:"id,omitempty"` + Names []string `json:"names,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_instance_pool.go b/bundle/internal/tf/schema/data_source_instance_pool.go index 240083d64..796d764b7 100644 --- a/bundle/internal/tf/schema/data_source_instance_pool.go +++ b/bundle/internal/tf/schema/data_source_instance_pool.go @@ -27,6 +27,7 @@ type DataSourceInstancePoolPoolInfoDiskSpec struct { type DataSourceInstancePoolPoolInfoGcpAttributes struct { GcpAvailability string `json:"gcp_availability,omitempty"` LocalSsdCount int `json:"local_ssd_count,omitempty"` + ZoneId string `json:"zone_id,omitempty"` } type DataSourceInstancePoolPoolInfoInstancePoolFleetAttributesFleetOnDemandOption struct { diff --git a/bundle/internal/tf/schema/data_source_job.go b/bundle/internal/tf/schema/data_source_job.go index 75d3672bc..727848ced 100644 --- a/bundle/internal/tf/schema/data_source_job.go +++ b/bundle/internal/tf/schema/data_source_job.go @@ -2,15 +2,6 @@ package schema -type DataSourceJobJobSettingsSettingsComputeSpec struct { - Kind string `json:"kind,omitempty"` -} - -type DataSourceJobJobSettingsSettingsCompute struct { - ComputeKey string `json:"compute_key,omitempty"` - Spec *DataSourceJobJobSettingsSettingsComputeSpec `json:"spec,omitempty"` -} - type DataSourceJobJobSettingsSettingsContinuous struct { PauseStatus string `json:"pause_status,omitempty"` } @@ -21,6 +12,7 @@ type DataSourceJobJobSettingsSettingsDbtTask struct { ProfilesDirectory string `json:"profiles_directory,omitempty"` ProjectDirectory string `json:"project_directory,omitempty"` Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` WarehouseId string `json:"warehouse_id,omitempty"` } @@ -34,9 +26,20 @@ type DataSourceJobJobSettingsSettingsEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } +type DataSourceJobJobSettingsSettingsEnvironmentSpec struct { + Client string `json:"client"` + Dependencies []string `json:"dependencies,omitempty"` +} + +type DataSourceJobJobSettingsSettingsEnvironment struct { + EnvironmentKey string `json:"environment_key"` + Spec *DataSourceJobJobSettingsSettingsEnvironmentSpec `json:"spec,omitempty"` +} + type DataSourceJobJobSettingsSettingsGitSourceJobSource struct { DirtyState string `json:"dirty_state,omitempty"` ImportFromGitBranch string `json:"import_from_git_branch"` @@ -53,9 +56,9 @@ type DataSourceJobJobSettingsSettingsGitSource struct { } type DataSourceJobJobSettingsSettingsHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type DataSourceJobJobSettingsSettingsHealth struct { @@ -134,7 +137,7 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterGcpAttributes struct { } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsDbfs struct { @@ -142,11 +145,11 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsDbfs struct } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 struct { @@ -160,11 +163,11 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 struct { } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScripts struct { @@ -220,7 +223,7 @@ type DataSourceJobJobSettingsSettingsJobClusterNewCluster struct { } type DataSourceJobJobSettingsSettingsJobCluster struct { - JobClusterKey string `json:"job_cluster_key,omitempty"` + JobClusterKey string `json:"job_cluster_key"` NewCluster *DataSourceJobJobSettingsSettingsJobClusterNewCluster `json:"new_cluster,omitempty"` } @@ -241,12 +244,13 @@ type DataSourceJobJobSettingsSettingsLibraryPypi struct { } type DataSourceJobJobSettingsSettingsLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *DataSourceJobJobSettingsSettingsLibraryCran `json:"cran,omitempty"` - Maven *DataSourceJobJobSettingsSettingsLibraryMaven `json:"maven,omitempty"` - Pypi *DataSourceJobJobSettingsSettingsLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *DataSourceJobJobSettingsSettingsLibraryCran `json:"cran,omitempty"` + Maven *DataSourceJobJobSettingsSettingsLibraryMaven `json:"maven,omitempty"` + Pypi *DataSourceJobJobSettingsSettingsLibraryPypi `json:"pypi,omitempty"` } type DataSourceJobJobSettingsSettingsNewClusterAutoscale struct { @@ -321,7 +325,7 @@ type DataSourceJobJobSettingsSettingsNewClusterGcpAttributes struct { } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsDbfs struct { @@ -329,11 +333,11 @@ type DataSourceJobJobSettingsSettingsNewClusterInitScriptsDbfs struct { } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 struct { @@ -347,11 +351,11 @@ type DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 struct { } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsNewClusterInitScripts struct { @@ -410,6 +414,7 @@ type DataSourceJobJobSettingsSettingsNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type DataSourceJobJobSettingsSettingsNotificationSettings struct { @@ -418,8 +423,8 @@ type DataSourceJobJobSettingsSettingsNotificationSettings struct { } type DataSourceJobJobSettingsSettingsParameter struct { - Default string `json:"default,omitempty"` - Name string `json:"name,omitempty"` + Default string `json:"default"` + Name string `json:"name"` } type DataSourceJobJobSettingsSettingsPipelineTask struct { @@ -471,9 +476,9 @@ type DataSourceJobJobSettingsSettingsSparkSubmitTask struct { } type DataSourceJobJobSettingsSettingsTaskConditionTask struct { - Left string `json:"left,omitempty"` - Op string `json:"op,omitempty"` - Right string `json:"right,omitempty"` + Left string `json:"left"` + Op string `json:"op"` + Right string `json:"right"` } type DataSourceJobJobSettingsSettingsTaskDbtTask struct { @@ -482,6 +487,7 @@ type DataSourceJobJobSettingsSettingsTaskDbtTask struct { ProfilesDirectory string `json:"profiles_directory,omitempty"` ProjectDirectory string `json:"project_directory,omitempty"` Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` WarehouseId string `json:"warehouse_id,omitempty"` } @@ -491,16 +497,392 @@ type DataSourceJobJobSettingsSettingsTaskDependsOn struct { } type DataSourceJobJobSettingsSettingsTaskEmailNotifications struct { + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskConditionTask struct { + Left string `json:"left"` + Op string `json:"op"` + Right string `json:"right"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDbtTask struct { + Catalog string `json:"catalog,omitempty"` + Commands []string `json:"commands"` + ProfilesDirectory string `json:"profiles_directory,omitempty"` + ProjectDirectory string `json:"project_directory,omitempty"` + Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDependsOn struct { + Outcome string `json:"outcome,omitempty"` + TaskKey string `json:"task_key"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications struct { + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []string `json:"on_failure,omitempty"` + OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` + OnSuccess []string `json:"on_success,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealthRules struct { + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealth struct { + Rules []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealthRules `json:"rules,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` + Maven *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` + Pypi *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibraryPypi `json:"pypi,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAutoscale struct { + MaxWorkers int `json:"max_workers,omitempty"` + MinWorkers int `json:"min_workers,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAwsAttributes struct { + Availability string `json:"availability,omitempty"` + EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeType string `json:"ebs_volume_type,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` + ZoneId string `json:"zone_id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAzureAttributes struct { + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConfDbfs struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConfS3 struct { + CannedAcl string `json:"canned_acl,omitempty"` + Destination string `json:"destination"` + EnableEncryption bool `json:"enable_encryption,omitempty"` + EncryptionType string `json:"encryption_type,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + KmsKey string `json:"kms_key,omitempty"` + Region string `json:"region,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConf struct { + Dbfs *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"` + S3 *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConfS3 `json:"s3,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo struct { + MountOptions string `json:"mount_options,omitempty"` + ServerAddress string `json:"server_address"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterMountInfo struct { + LocalMountDirPath string `json:"local_mount_dir_path"` + RemoteMountDirPath string `json:"remote_mount_dir_path,omitempty"` + NetworkFilesystemInfo *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo `json:"network_filesystem_info,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterDockerImageBasicAuth struct { + Password string `json:"password"` + Username string `json:"username"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterDockerImage struct { + Url string `json:"url"` + BasicAuth *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterDockerImageBasicAuth `json:"basic_auth,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterGcpAttributes struct { + Availability string `json:"availability,omitempty"` + BootDiskSize int `json:"boot_disk_size,omitempty"` + GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` + UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` + ZoneId string `json:"zone_id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsAbfss struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsDbfs struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsFile struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsGcs struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsS3 struct { + CannedAcl string `json:"canned_acl,omitempty"` + Destination string `json:"destination"` + EnableEncryption bool `json:"enable_encryption,omitempty"` + EncryptionType string `json:"encryption_type,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + KmsKey string `json:"kms_key,omitempty"` + Region string `json:"region,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsVolumes struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsWorkspace struct { + Destination string `json:"destination"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScripts struct { + Abfss *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"` + Dbfs *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"` + File *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsFile `json:"file,omitempty"` + Gcs *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"` + S3 *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsVolumes `json:"volumes,omitempty"` + Workspace *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterWorkloadTypeClients struct { + Jobs bool `json:"jobs,omitempty"` + Notebooks bool `json:"notebooks,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterWorkloadType struct { + Clients *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterWorkloadTypeClients `json:"clients,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewCluster struct { + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` + ClusterId string `json:"cluster_id,omitempty"` + ClusterName string `json:"cluster_name,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + DataSecurityMode string `json:"data_security_mode,omitempty"` + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + IdempotencyToken string `json:"idempotency_token,omitempty"` + InstancePoolId string `json:"instance_pool_id,omitempty"` + NodeTypeId string `json:"node_type_id,omitempty"` + NumWorkers int `json:"num_workers"` + PolicyId string `json:"policy_id,omitempty"` + RuntimeEngine string `json:"runtime_engine,omitempty"` + SingleUserName string `json:"single_user_name,omitempty"` + SparkConf map[string]string `json:"spark_conf,omitempty"` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + SparkVersion string `json:"spark_version"` + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + Autoscale *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAutoscale `json:"autoscale,omitempty"` + AwsAttributes *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` + AzureAttributes *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` + ClusterLogConf *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterLogConf `json:"cluster_log_conf,omitempty"` + ClusterMountInfo []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterClusterMountInfo `json:"cluster_mount_info,omitempty"` + DockerImage *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterDockerImage `json:"docker_image,omitempty"` + GcpAttributes *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` + InitScripts []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterInitScripts `json:"init_scripts,omitempty"` + WorkloadType *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewClusterWorkloadType `json:"workload_type,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotebookTask struct { + BaseParameters map[string]string `json:"base_parameters,omitempty"` + NotebookPath string `json:"notebook_path"` + Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotificationSettings struct { + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskPipelineTask struct { + FullRefresh bool `json:"full_refresh,omitempty"` + PipelineId string `json:"pipeline_id"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskPythonWheelTask struct { + EntryPoint string `json:"entry_point,omitempty"` + NamedParameters map[string]string `json:"named_parameters,omitempty"` + PackageName string `json:"package_name,omitempty"` + Parameters []string `json:"parameters,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskRunJobTask struct { + JobId int `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkJarTask struct { + JarUri string `json:"jar_uri,omitempty"` + MainClassName string `json:"main_class_name,omitempty"` + Parameters []string `json:"parameters,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkPythonTask struct { + Parameters []string `json:"parameters,omitempty"` + PythonFile string `json:"python_file"` + Source string `json:"source,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkSubmitTask struct { + Parameters []string `json:"parameters,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlertSubscriptions struct { + DestinationId string `json:"destination_id,omitempty"` + UserName string `json:"user_name,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlert struct { + AlertId string `json:"alert_id"` + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + Subscriptions []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlertSubscriptions `json:"subscriptions,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboardSubscriptions struct { + DestinationId string `json:"destination_id,omitempty"` + UserName string `json:"user_name,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboard struct { + CustomSubject string `json:"custom_subject,omitempty"` + DashboardId string `json:"dashboard_id"` + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + Subscriptions []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboardSubscriptions `json:"subscriptions,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskFile struct { + Path string `json:"path"` + Source string `json:"source,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskQuery struct { + QueryId string `json:"query_id"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTask struct { + Parameters map[string]string `json:"parameters,omitempty"` + WarehouseId string `json:"warehouse_id"` + Alert *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"` + Dashboard *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"` + File *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"` + Query *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTaskQuery `json:"query,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { + Id string `json:"id"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure struct { + Id string `json:"id"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart struct { + Id string `json:"id"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { + Id string `json:"id"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications struct { + OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` + OnStart []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` + OnSuccess []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTaskTask struct { + Description string `json:"description,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + ConditionTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskHealth `json:"health,omitempty"` + Library []DataSourceJobJobSettingsSettingsTaskForEachTaskTaskLibrary `json:"library,omitempty"` + NewCluster *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *DataSourceJobJobSettingsSettingsTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` +} + +type DataSourceJobJobSettingsSettingsTaskForEachTask struct { + Concurrency int `json:"concurrency,omitempty"` + Inputs string `json:"inputs"` + Task *DataSourceJobJobSettingsSettingsTaskForEachTaskTask `json:"task,omitempty"` +} + type DataSourceJobJobSettingsSettingsTaskHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type DataSourceJobJobSettingsSettingsTaskHealth struct { @@ -524,12 +906,13 @@ type DataSourceJobJobSettingsSettingsTaskLibraryPypi struct { } type DataSourceJobJobSettingsSettingsTaskLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *DataSourceJobJobSettingsSettingsTaskLibraryCran `json:"cran,omitempty"` - Maven *DataSourceJobJobSettingsSettingsTaskLibraryMaven `json:"maven,omitempty"` - Pypi *DataSourceJobJobSettingsSettingsTaskLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *DataSourceJobJobSettingsSettingsTaskLibraryCran `json:"cran,omitempty"` + Maven *DataSourceJobJobSettingsSettingsTaskLibraryMaven `json:"maven,omitempty"` + Pypi *DataSourceJobJobSettingsSettingsTaskLibraryPypi `json:"pypi,omitempty"` } type DataSourceJobJobSettingsSettingsTaskNewClusterAutoscale struct { @@ -604,7 +987,7 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterGcpAttributes struct { } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsDbfs struct { @@ -612,11 +995,11 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsDbfs struct { } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 struct { @@ -630,11 +1013,11 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 struct { } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type DataSourceJobJobSettingsSettingsTaskNewClusterInitScripts struct { @@ -693,6 +1076,7 @@ type DataSourceJobJobSettingsSettingsTaskNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type DataSourceJobJobSettingsSettingsTaskNotificationSettings struct { @@ -758,7 +1142,8 @@ type DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard struct { } type DataSourceJobJobSettingsSettingsTaskSqlTaskFile struct { - Path string `json:"path"` + Path string `json:"path"` + Source string `json:"source,omitempty"` } type DataSourceJobJobSettingsSettingsTaskSqlTaskQuery struct { @@ -767,7 +1152,7 @@ type DataSourceJobJobSettingsSettingsTaskSqlTaskQuery struct { type DataSourceJobJobSettingsSettingsTaskSqlTask struct { Parameters map[string]string `json:"parameters,omitempty"` - WarehouseId string `json:"warehouse_id,omitempty"` + WarehouseId string `json:"warehouse_id"` Alert *DataSourceJobJobSettingsSettingsTaskSqlTaskAlert `json:"alert,omitempty"` Dashboard *DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard `json:"dashboard,omitempty"` File *DataSourceJobJobSettingsSettingsTaskSqlTaskFile `json:"file,omitempty"` @@ -775,43 +1160,49 @@ type DataSourceJobJobSettingsSettingsTaskSqlTask struct { } type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` +} + +type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsTaskWebhookNotifications struct { OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []DataSourceJobJobSettingsSettingsTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } type DataSourceJobJobSettingsSettingsTask struct { - ComputeKey string `json:"compute_key,omitempty"` Description string `json:"description,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` ExistingClusterId string `json:"existing_cluster_id,omitempty"` JobClusterKey string `json:"job_cluster_key,omitempty"` MaxRetries int `json:"max_retries,omitempty"` MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key,omitempty"` + TaskKey string `json:"task_key"` TimeoutSeconds int `json:"timeout_seconds,omitempty"` ConditionTask *DataSourceJobJobSettingsSettingsTaskConditionTask `json:"condition_task,omitempty"` DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"` DependsOn []DataSourceJobJobSettingsSettingsTaskDependsOn `json:"depends_on,omitempty"` EmailNotifications *DataSourceJobJobSettingsSettingsTaskEmailNotifications `json:"email_notifications,omitempty"` + ForEachTask *DataSourceJobJobSettingsSettingsTaskForEachTask `json:"for_each_task,omitempty"` Health *DataSourceJobJobSettingsSettingsTaskHealth `json:"health,omitempty"` Library []DataSourceJobJobSettingsSettingsTaskLibrary `json:"library,omitempty"` NewCluster *DataSourceJobJobSettingsSettingsTaskNewCluster `json:"new_cluster,omitempty"` @@ -833,31 +1224,44 @@ type DataSourceJobJobSettingsSettingsTriggerFileArrival struct { WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` } +type DataSourceJobJobSettingsSettingsTriggerTableUpdate struct { + Condition string `json:"condition,omitempty"` + MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` + TableNames []string `json:"table_names"` + WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` +} + type DataSourceJobJobSettingsSettingsTrigger struct { PauseStatus string `json:"pause_status,omitempty"` FileArrival *DataSourceJobJobSettingsSettingsTriggerFileArrival `json:"file_arrival,omitempty"` + TableUpdate *DataSourceJobJobSettingsSettingsTriggerTableUpdate `json:"table_update,omitempty"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` +} + +type DataSourceJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type DataSourceJobJobSettingsSettingsWebhookNotifications struct { OnDurationWarningThresholdExceeded []DataSourceJobJobSettingsSettingsWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []DataSourceJobJobSettingsSettingsWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []DataSourceJobJobSettingsSettingsWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []DataSourceJobJobSettingsSettingsWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -873,11 +1277,11 @@ type DataSourceJobJobSettingsSettings struct { RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` Tags map[string]string `json:"tags,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"` - Compute []DataSourceJobJobSettingsSettingsCompute `json:"compute,omitempty"` Continuous *DataSourceJobJobSettingsSettingsContinuous `json:"continuous,omitempty"` DbtTask *DataSourceJobJobSettingsSettingsDbtTask `json:"dbt_task,omitempty"` Deployment *DataSourceJobJobSettingsSettingsDeployment `json:"deployment,omitempty"` EmailNotifications *DataSourceJobJobSettingsSettingsEmailNotifications `json:"email_notifications,omitempty"` + Environment []DataSourceJobJobSettingsSettingsEnvironment `json:"environment,omitempty"` GitSource *DataSourceJobJobSettingsSettingsGitSource `json:"git_source,omitempty"` Health *DataSourceJobJobSettingsSettingsHealth `json:"health,omitempty"` JobCluster []DataSourceJobJobSettingsSettingsJobCluster `json:"job_cluster,omitempty"` diff --git a/bundle/internal/tf/schema/data_source_metastore.go b/bundle/internal/tf/schema/data_source_metastore.go index dd14be81c..ce2064794 100644 --- a/bundle/internal/tf/schema/data_source_metastore.go +++ b/bundle/internal/tf/schema/data_source_metastore.go @@ -25,6 +25,8 @@ type DataSourceMetastoreMetastoreInfo struct { type DataSourceMetastore struct { Id string `json:"id,omitempty"` - MetastoreId string `json:"metastore_id"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Region string `json:"region,omitempty"` MetastoreInfo *DataSourceMetastoreMetastoreInfo `json:"metastore_info,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_mlflow_experiment.go b/bundle/internal/tf/schema/data_source_mlflow_experiment.go new file mode 100644 index 000000000..979130c5f --- /dev/null +++ b/bundle/internal/tf/schema/data_source_mlflow_experiment.go @@ -0,0 +1,19 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceMlflowExperimentTags struct { + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` +} + +type DataSourceMlflowExperiment struct { + ArtifactLocation string `json:"artifact_location,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + ExperimentId string `json:"experiment_id,omitempty"` + Id string `json:"id,omitempty"` + LastUpdateTime int `json:"last_update_time,omitempty"` + LifecycleStage string `json:"lifecycle_stage,omitempty"` + Name string `json:"name,omitempty"` + Tags []DataSourceMlflowExperimentTags `json:"tags,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_sql_warehouse.go b/bundle/internal/tf/schema/data_source_sql_warehouse.go index 218591d09..05212f0bd 100644 --- a/bundle/internal/tf/schema/data_source_sql_warehouse.go +++ b/bundle/internal/tf/schema/data_source_sql_warehouse.go @@ -3,20 +3,34 @@ package schema type DataSourceSqlWarehouseChannel struct { - Name string `json:"name,omitempty"` + DbsqlVersion string `json:"dbsql_version,omitempty"` + Name string `json:"name,omitempty"` +} + +type DataSourceSqlWarehouseHealthFailureReason struct { + Code string `json:"code,omitempty"` + Parameters map[string]string `json:"parameters,omitempty"` + Type string `json:"type,omitempty"` +} + +type DataSourceSqlWarehouseHealth struct { + Details string `json:"details,omitempty"` + Message string `json:"message,omitempty"` + Status string `json:"status,omitempty"` + Summary string `json:"summary,omitempty"` + FailureReason *DataSourceSqlWarehouseHealthFailureReason `json:"failure_reason,omitempty"` } type DataSourceSqlWarehouseOdbcParams struct { - Host string `json:"host,omitempty"` Hostname string `json:"hostname,omitempty"` - Path string `json:"path"` - Port int `json:"port"` - Protocol string `json:"protocol"` + Path string `json:"path,omitempty"` + Port int `json:"port,omitempty"` + Protocol string `json:"protocol,omitempty"` } type DataSourceSqlWarehouseTagsCustomTags struct { - Key string `json:"key"` - Value string `json:"value"` + Key string `json:"key,omitempty"` + Value string `json:"value,omitempty"` } type DataSourceSqlWarehouseTags struct { @@ -26,6 +40,7 @@ type DataSourceSqlWarehouseTags struct { type DataSourceSqlWarehouse struct { AutoStopMins int `json:"auto_stop_mins,omitempty"` ClusterSize string `json:"cluster_size,omitempty"` + CreatorName string `json:"creator_name,omitempty"` DataSourceId string `json:"data_source_id,omitempty"` EnablePhoton bool `json:"enable_photon,omitempty"` EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` @@ -35,10 +50,13 @@ type DataSourceSqlWarehouse struct { MaxNumClusters int `json:"max_num_clusters,omitempty"` MinNumClusters int `json:"min_num_clusters,omitempty"` Name string `json:"name,omitempty"` + NumActiveSessions int `json:"num_active_sessions,omitempty"` NumClusters int `json:"num_clusters,omitempty"` SpotInstancePolicy string `json:"spot_instance_policy,omitempty"` State string `json:"state,omitempty"` + WarehouseType string `json:"warehouse_type,omitempty"` Channel *DataSourceSqlWarehouseChannel `json:"channel,omitempty"` + Health *DataSourceSqlWarehouseHealth `json:"health,omitempty"` OdbcParams *DataSourceSqlWarehouseOdbcParams `json:"odbc_params,omitempty"` Tags *DataSourceSqlWarehouseTags `json:"tags,omitempty"` } diff --git a/bundle/internal/tf/schema/data_source_storage_credential.go b/bundle/internal/tf/schema/data_source_storage_credential.go new file mode 100644 index 000000000..bf58f2726 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_storage_credential.go @@ -0,0 +1,58 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceStorageCredentialStorageCredentialInfoAwsIamRole struct { + ExternalId string `json:"external_id,omitempty"` + RoleArn string `json:"role_arn"` + UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` +} + +type DataSourceStorageCredentialStorageCredentialInfoAzureManagedIdentity struct { + AccessConnectorId string `json:"access_connector_id"` + CredentialId string `json:"credential_id,omitempty"` + ManagedIdentityId string `json:"managed_identity_id,omitempty"` +} + +type DataSourceStorageCredentialStorageCredentialInfoAzureServicePrincipal struct { + ApplicationId string `json:"application_id"` + ClientSecret string `json:"client_secret"` + DirectoryId string `json:"directory_id"` +} + +type DataSourceStorageCredentialStorageCredentialInfoCloudflareApiToken struct { + AccessKeyId string `json:"access_key_id"` + AccountId string `json:"account_id"` + SecretAccessKey string `json:"secret_access_key"` +} + +type DataSourceStorageCredentialStorageCredentialInfoDatabricksGcpServiceAccount struct { + CredentialId string `json:"credential_id,omitempty"` + Email string `json:"email,omitempty"` +} + +type DataSourceStorageCredentialStorageCredentialInfo struct { + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Id string `json:"id,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + ReadOnly bool `json:"read_only,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"` + AwsIamRole *DataSourceStorageCredentialStorageCredentialInfoAwsIamRole `json:"aws_iam_role,omitempty"` + AzureManagedIdentity *DataSourceStorageCredentialStorageCredentialInfoAzureManagedIdentity `json:"azure_managed_identity,omitempty"` + AzureServicePrincipal *DataSourceStorageCredentialStorageCredentialInfoAzureServicePrincipal `json:"azure_service_principal,omitempty"` + CloudflareApiToken *DataSourceStorageCredentialStorageCredentialInfoCloudflareApiToken `json:"cloudflare_api_token,omitempty"` + DatabricksGcpServiceAccount *DataSourceStorageCredentialStorageCredentialInfoDatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` +} + +type DataSourceStorageCredential struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + StorageCredentialInfo *DataSourceStorageCredentialStorageCredentialInfo `json:"storage_credential_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_storage_credentials.go b/bundle/internal/tf/schema/data_source_storage_credentials.go new file mode 100644 index 000000000..153def357 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_storage_credentials.go @@ -0,0 +1,8 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceStorageCredentials struct { + Id string `json:"id,omitempty"` + Names []string `json:"names,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_table.go b/bundle/internal/tf/schema/data_source_table.go new file mode 100644 index 000000000..f59959696 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_table.go @@ -0,0 +1,127 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceTableTableInfoColumnsMask struct { + FunctionName string `json:"function_name,omitempty"` + UsingColumnNames []string `json:"using_column_names,omitempty"` +} + +type DataSourceTableTableInfoColumns struct { + Comment string `json:"comment,omitempty"` + Name string `json:"name,omitempty"` + Nullable bool `json:"nullable,omitempty"` + PartitionIndex int `json:"partition_index,omitempty"` + Position int `json:"position,omitempty"` + TypeIntervalType string `json:"type_interval_type,omitempty"` + TypeJson string `json:"type_json,omitempty"` + TypeName string `json:"type_name,omitempty"` + TypePrecision int `json:"type_precision,omitempty"` + TypeScale int `json:"type_scale,omitempty"` + TypeText string `json:"type_text,omitempty"` + Mask *DataSourceTableTableInfoColumnsMask `json:"mask,omitempty"` +} + +type DataSourceTableTableInfoDeltaRuntimePropertiesKvpairs struct { + DeltaRuntimeProperties map[string]string `json:"delta_runtime_properties"` +} + +type DataSourceTableTableInfoEffectivePredictiveOptimizationFlag struct { + InheritedFromName string `json:"inherited_from_name,omitempty"` + InheritedFromType string `json:"inherited_from_type,omitempty"` + Value string `json:"value"` +} + +type DataSourceTableTableInfoEncryptionDetailsSseEncryptionDetails struct { + Algorithm string `json:"algorithm,omitempty"` + AwsKmsKeyArn string `json:"aws_kms_key_arn,omitempty"` +} + +type DataSourceTableTableInfoEncryptionDetails struct { + SseEncryptionDetails *DataSourceTableTableInfoEncryptionDetailsSseEncryptionDetails `json:"sse_encryption_details,omitempty"` +} + +type DataSourceTableTableInfoRowFilter struct { + FunctionName string `json:"function_name"` + InputColumnNames []string `json:"input_column_names"` +} + +type DataSourceTableTableInfoTableConstraintsForeignKeyConstraint struct { + ChildColumns []string `json:"child_columns"` + Name string `json:"name"` + ParentColumns []string `json:"parent_columns"` + ParentTable string `json:"parent_table"` +} + +type DataSourceTableTableInfoTableConstraintsNamedTableConstraint struct { + Name string `json:"name"` +} + +type DataSourceTableTableInfoTableConstraintsPrimaryKeyConstraint struct { + ChildColumns []string `json:"child_columns"` + Name string `json:"name"` +} + +type DataSourceTableTableInfoTableConstraints struct { + ForeignKeyConstraint *DataSourceTableTableInfoTableConstraintsForeignKeyConstraint `json:"foreign_key_constraint,omitempty"` + NamedTableConstraint *DataSourceTableTableInfoTableConstraintsNamedTableConstraint `json:"named_table_constraint,omitempty"` + PrimaryKeyConstraint *DataSourceTableTableInfoTableConstraintsPrimaryKeyConstraint `json:"primary_key_constraint,omitempty"` +} + +type DataSourceTableTableInfoViewDependenciesDependenciesFunction struct { + FunctionFullName string `json:"function_full_name"` +} + +type DataSourceTableTableInfoViewDependenciesDependenciesTable struct { + TableFullName string `json:"table_full_name"` +} + +type DataSourceTableTableInfoViewDependenciesDependencies struct { + Function *DataSourceTableTableInfoViewDependenciesDependenciesFunction `json:"function,omitempty"` + Table *DataSourceTableTableInfoViewDependenciesDependenciesTable `json:"table,omitempty"` +} + +type DataSourceTableTableInfoViewDependencies struct { + Dependencies []DataSourceTableTableInfoViewDependenciesDependencies `json:"dependencies,omitempty"` +} + +type DataSourceTableTableInfo struct { + AccessPoint string `json:"access_point,omitempty"` + BrowseOnly bool `json:"browse_only,omitempty"` + CatalogName string `json:"catalog_name,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + DataAccessConfigurationId string `json:"data_access_configuration_id,omitempty"` + DataSourceFormat string `json:"data_source_format,omitempty"` + DeletedAt int `json:"deleted_at,omitempty"` + EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"` + FullName string `json:"full_name,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name,omitempty"` + Owner string `json:"owner,omitempty"` + PipelineId string `json:"pipeline_id,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + SqlPath string `json:"sql_path,omitempty"` + StorageCredentialName string `json:"storage_credential_name,omitempty"` + StorageLocation string `json:"storage_location,omitempty"` + TableId string `json:"table_id,omitempty"` + TableType string `json:"table_type,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + ViewDefinition string `json:"view_definition,omitempty"` + Columns []DataSourceTableTableInfoColumns `json:"columns,omitempty"` + DeltaRuntimePropertiesKvpairs *DataSourceTableTableInfoDeltaRuntimePropertiesKvpairs `json:"delta_runtime_properties_kvpairs,omitempty"` + EffectivePredictiveOptimizationFlag *DataSourceTableTableInfoEffectivePredictiveOptimizationFlag `json:"effective_predictive_optimization_flag,omitempty"` + EncryptionDetails *DataSourceTableTableInfoEncryptionDetails `json:"encryption_details,omitempty"` + RowFilter *DataSourceTableTableInfoRowFilter `json:"row_filter,omitempty"` + TableConstraints []DataSourceTableTableInfoTableConstraints `json:"table_constraints,omitempty"` + ViewDependencies *DataSourceTableTableInfoViewDependencies `json:"view_dependencies,omitempty"` +} + +type DataSourceTable struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + TableInfo *DataSourceTableTableInfo `json:"table_info,omitempty"` +} diff --git a/bundle/internal/tf/schema/data_source_volumes.go b/bundle/internal/tf/schema/data_source_volumes.go new file mode 100644 index 000000000..07bf59338 --- /dev/null +++ b/bundle/internal/tf/schema/data_source_volumes.go @@ -0,0 +1,10 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type DataSourceVolumes struct { + CatalogName string `json:"catalog_name"` + Id string `json:"id,omitempty"` + Ids []string `json:"ids,omitempty"` + SchemaName string `json:"schema_name"` +} diff --git a/bundle/internal/tf/schema/data_sources.go b/bundle/internal/tf/schema/data_sources.go index c61ab9096..b68df2b40 100644 --- a/bundle/internal/tf/schema/data_sources.go +++ b/bundle/internal/tf/schema/data_sources.go @@ -3,85 +3,107 @@ package schema type DataSources struct { - AwsAssumeRolePolicy map[string]*DataSourceAwsAssumeRolePolicy `json:"databricks_aws_assume_role_policy,omitempty"` - AwsBucketPolicy map[string]*DataSourceAwsBucketPolicy `json:"databricks_aws_bucket_policy,omitempty"` - AwsCrossaccountPolicy map[string]*DataSourceAwsCrossaccountPolicy `json:"databricks_aws_crossaccount_policy,omitempty"` - Catalogs map[string]*DataSourceCatalogs `json:"databricks_catalogs,omitempty"` - Cluster map[string]*DataSourceCluster `json:"databricks_cluster,omitempty"` - ClusterPolicy map[string]*DataSourceClusterPolicy `json:"databricks_cluster_policy,omitempty"` - Clusters map[string]*DataSourceClusters `json:"databricks_clusters,omitempty"` - CurrentConfig map[string]*DataSourceCurrentConfig `json:"databricks_current_config,omitempty"` - CurrentUser map[string]*DataSourceCurrentUser `json:"databricks_current_user,omitempty"` - DbfsFile map[string]*DataSourceDbfsFile `json:"databricks_dbfs_file,omitempty"` - DbfsFilePaths map[string]*DataSourceDbfsFilePaths `json:"databricks_dbfs_file_paths,omitempty"` - Directory map[string]*DataSourceDirectory `json:"databricks_directory,omitempty"` - Group map[string]*DataSourceGroup `json:"databricks_group,omitempty"` - InstancePool map[string]*DataSourceInstancePool `json:"databricks_instance_pool,omitempty"` - InstanceProfiles map[string]*DataSourceInstanceProfiles `json:"databricks_instance_profiles,omitempty"` - Job map[string]*DataSourceJob `json:"databricks_job,omitempty"` - Jobs map[string]*DataSourceJobs `json:"databricks_jobs,omitempty"` - Metastore map[string]*DataSourceMetastore `json:"databricks_metastore,omitempty"` - Metastores map[string]*DataSourceMetastores `json:"databricks_metastores,omitempty"` - MlflowModel map[string]*DataSourceMlflowModel `json:"databricks_mlflow_model,omitempty"` - MwsCredentials map[string]*DataSourceMwsCredentials `json:"databricks_mws_credentials,omitempty"` - MwsWorkspaces map[string]*DataSourceMwsWorkspaces `json:"databricks_mws_workspaces,omitempty"` - NodeType map[string]*DataSourceNodeType `json:"databricks_node_type,omitempty"` - Notebook map[string]*DataSourceNotebook `json:"databricks_notebook,omitempty"` - NotebookPaths map[string]*DataSourceNotebookPaths `json:"databricks_notebook_paths,omitempty"` - Pipelines map[string]*DataSourcePipelines `json:"databricks_pipelines,omitempty"` - Schemas map[string]*DataSourceSchemas `json:"databricks_schemas,omitempty"` - ServicePrincipal map[string]*DataSourceServicePrincipal `json:"databricks_service_principal,omitempty"` - ServicePrincipals map[string]*DataSourceServicePrincipals `json:"databricks_service_principals,omitempty"` - Share map[string]*DataSourceShare `json:"databricks_share,omitempty"` - Shares map[string]*DataSourceShares `json:"databricks_shares,omitempty"` - SparkVersion map[string]*DataSourceSparkVersion `json:"databricks_spark_version,omitempty"` - SqlWarehouse map[string]*DataSourceSqlWarehouse `json:"databricks_sql_warehouse,omitempty"` - SqlWarehouses map[string]*DataSourceSqlWarehouses `json:"databricks_sql_warehouses,omitempty"` - Tables map[string]*DataSourceTables `json:"databricks_tables,omitempty"` - User map[string]*DataSourceUser `json:"databricks_user,omitempty"` - Views map[string]*DataSourceViews `json:"databricks_views,omitempty"` - Zones map[string]*DataSourceZones `json:"databricks_zones,omitempty"` + AwsAssumeRolePolicy map[string]any `json:"databricks_aws_assume_role_policy,omitempty"` + AwsBucketPolicy map[string]any `json:"databricks_aws_bucket_policy,omitempty"` + AwsCrossaccountPolicy map[string]any `json:"databricks_aws_crossaccount_policy,omitempty"` + AwsUnityCatalogAssumeRolePolicy map[string]any `json:"databricks_aws_unity_catalog_assume_role_policy,omitempty"` + AwsUnityCatalogPolicy map[string]any `json:"databricks_aws_unity_catalog_policy,omitempty"` + Catalog map[string]any `json:"databricks_catalog,omitempty"` + Catalogs map[string]any `json:"databricks_catalogs,omitempty"` + Cluster map[string]any `json:"databricks_cluster,omitempty"` + ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` + Clusters map[string]any `json:"databricks_clusters,omitempty"` + CurrentConfig map[string]any `json:"databricks_current_config,omitempty"` + CurrentMetastore map[string]any `json:"databricks_current_metastore,omitempty"` + CurrentUser map[string]any `json:"databricks_current_user,omitempty"` + DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` + DbfsFilePaths map[string]any `json:"databricks_dbfs_file_paths,omitempty"` + Directory map[string]any `json:"databricks_directory,omitempty"` + ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` + ExternalLocations map[string]any `json:"databricks_external_locations,omitempty"` + Group map[string]any `json:"databricks_group,omitempty"` + InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` + InstanceProfiles map[string]any `json:"databricks_instance_profiles,omitempty"` + Job map[string]any `json:"databricks_job,omitempty"` + Jobs map[string]any `json:"databricks_jobs,omitempty"` + Metastore map[string]any `json:"databricks_metastore,omitempty"` + Metastores map[string]any `json:"databricks_metastores,omitempty"` + MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` + MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` + MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` + MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` + NodeType map[string]any `json:"databricks_node_type,omitempty"` + Notebook map[string]any `json:"databricks_notebook,omitempty"` + NotebookPaths map[string]any `json:"databricks_notebook_paths,omitempty"` + Pipelines map[string]any `json:"databricks_pipelines,omitempty"` + Schemas map[string]any `json:"databricks_schemas,omitempty"` + ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` + ServicePrincipals map[string]any `json:"databricks_service_principals,omitempty"` + Share map[string]any `json:"databricks_share,omitempty"` + Shares map[string]any `json:"databricks_shares,omitempty"` + SparkVersion map[string]any `json:"databricks_spark_version,omitempty"` + SqlWarehouse map[string]any `json:"databricks_sql_warehouse,omitempty"` + SqlWarehouses map[string]any `json:"databricks_sql_warehouses,omitempty"` + StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` + StorageCredentials map[string]any `json:"databricks_storage_credentials,omitempty"` + Table map[string]any `json:"databricks_table,omitempty"` + Tables map[string]any `json:"databricks_tables,omitempty"` + User map[string]any `json:"databricks_user,omitempty"` + Views map[string]any `json:"databricks_views,omitempty"` + Volumes map[string]any `json:"databricks_volumes,omitempty"` + Zones map[string]any `json:"databricks_zones,omitempty"` } func NewDataSources() *DataSources { return &DataSources{ - AwsAssumeRolePolicy: make(map[string]*DataSourceAwsAssumeRolePolicy), - AwsBucketPolicy: make(map[string]*DataSourceAwsBucketPolicy), - AwsCrossaccountPolicy: make(map[string]*DataSourceAwsCrossaccountPolicy), - Catalogs: make(map[string]*DataSourceCatalogs), - Cluster: make(map[string]*DataSourceCluster), - ClusterPolicy: make(map[string]*DataSourceClusterPolicy), - Clusters: make(map[string]*DataSourceClusters), - CurrentConfig: make(map[string]*DataSourceCurrentConfig), - CurrentUser: make(map[string]*DataSourceCurrentUser), - DbfsFile: make(map[string]*DataSourceDbfsFile), - DbfsFilePaths: make(map[string]*DataSourceDbfsFilePaths), - Directory: make(map[string]*DataSourceDirectory), - Group: make(map[string]*DataSourceGroup), - InstancePool: make(map[string]*DataSourceInstancePool), - InstanceProfiles: make(map[string]*DataSourceInstanceProfiles), - Job: make(map[string]*DataSourceJob), - Jobs: make(map[string]*DataSourceJobs), - Metastore: make(map[string]*DataSourceMetastore), - Metastores: make(map[string]*DataSourceMetastores), - MlflowModel: make(map[string]*DataSourceMlflowModel), - MwsCredentials: make(map[string]*DataSourceMwsCredentials), - MwsWorkspaces: make(map[string]*DataSourceMwsWorkspaces), - NodeType: make(map[string]*DataSourceNodeType), - Notebook: make(map[string]*DataSourceNotebook), - NotebookPaths: make(map[string]*DataSourceNotebookPaths), - Pipelines: make(map[string]*DataSourcePipelines), - Schemas: make(map[string]*DataSourceSchemas), - ServicePrincipal: make(map[string]*DataSourceServicePrincipal), - ServicePrincipals: make(map[string]*DataSourceServicePrincipals), - Share: make(map[string]*DataSourceShare), - Shares: make(map[string]*DataSourceShares), - SparkVersion: make(map[string]*DataSourceSparkVersion), - SqlWarehouse: make(map[string]*DataSourceSqlWarehouse), - SqlWarehouses: make(map[string]*DataSourceSqlWarehouses), - Tables: make(map[string]*DataSourceTables), - User: make(map[string]*DataSourceUser), - Views: make(map[string]*DataSourceViews), - Zones: make(map[string]*DataSourceZones), + AwsAssumeRolePolicy: make(map[string]any), + AwsBucketPolicy: make(map[string]any), + AwsCrossaccountPolicy: make(map[string]any), + AwsUnityCatalogAssumeRolePolicy: make(map[string]any), + AwsUnityCatalogPolicy: make(map[string]any), + Catalog: make(map[string]any), + Catalogs: make(map[string]any), + Cluster: make(map[string]any), + ClusterPolicy: make(map[string]any), + Clusters: make(map[string]any), + CurrentConfig: make(map[string]any), + CurrentMetastore: make(map[string]any), + CurrentUser: make(map[string]any), + DbfsFile: make(map[string]any), + DbfsFilePaths: make(map[string]any), + Directory: make(map[string]any), + ExternalLocation: make(map[string]any), + ExternalLocations: make(map[string]any), + Group: make(map[string]any), + InstancePool: make(map[string]any), + InstanceProfiles: make(map[string]any), + Job: make(map[string]any), + Jobs: make(map[string]any), + Metastore: make(map[string]any), + Metastores: make(map[string]any), + MlflowExperiment: make(map[string]any), + MlflowModel: make(map[string]any), + MwsCredentials: make(map[string]any), + MwsWorkspaces: make(map[string]any), + NodeType: make(map[string]any), + Notebook: make(map[string]any), + NotebookPaths: make(map[string]any), + Pipelines: make(map[string]any), + Schemas: make(map[string]any), + ServicePrincipal: make(map[string]any), + ServicePrincipals: make(map[string]any), + Share: make(map[string]any), + Shares: make(map[string]any), + SparkVersion: make(map[string]any), + SqlWarehouse: make(map[string]any), + SqlWarehouses: make(map[string]any), + StorageCredential: make(map[string]any), + StorageCredentials: make(map[string]any), + Table: make(map[string]any), + Tables: make(map[string]any), + User: make(map[string]any), + Views: make(map[string]any), + Volumes: make(map[string]any), + Zones: make(map[string]any), } } diff --git a/bundle/internal/tf/schema/resource_automatic_cluster_update_workspace_setting.go b/bundle/internal/tf/schema/resource_automatic_cluster_update_workspace_setting.go new file mode 100644 index 000000000..e95639de8 --- /dev/null +++ b/bundle/internal/tf/schema/resource_automatic_cluster_update_workspace_setting.go @@ -0,0 +1,39 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails struct { + ForcedForComplianceMode bool `json:"forced_for_compliance_mode,omitempty"` + UnavailableForDisabledEntitlement bool `json:"unavailable_for_disabled_entitlement,omitempty"` + UnavailableForNonEnterpriseTier bool `json:"unavailable_for_non_enterprise_tier,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime struct { + Hours int `json:"hours,omitempty"` + Minutes int `json:"minutes,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule struct { + DayOfWeek string `json:"day_of_week,omitempty"` + Frequency string `json:"frequency,omitempty"` + WindowStartTime *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedScheduleWindowStartTime `json:"window_start_time,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow struct { + WeekDayBasedSchedule *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindowWeekDayBasedSchedule `json:"week_day_based_schedule,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace struct { + CanToggle bool `json:"can_toggle,omitempty"` + Enabled bool `json:"enabled,omitempty"` + RestartEvenIfNoUpdatesAvailable bool `json:"restart_even_if_no_updates_available,omitempty"` + EnablementDetails *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceEnablementDetails `json:"enablement_details,omitempty"` + MaintenanceWindow *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspaceMaintenanceWindow `json:"maintenance_window,omitempty"` +} + +type ResourceAutomaticClusterUpdateWorkspaceSetting struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + SettingName string `json:"setting_name,omitempty"` + AutomaticClusterUpdateWorkspace *ResourceAutomaticClusterUpdateWorkspaceSettingAutomaticClusterUpdateWorkspace `json:"automatic_cluster_update_workspace,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_catalog.go b/bundle/internal/tf/schema/resource_catalog.go index a54f1c270..76c355288 100644 --- a/bundle/internal/tf/schema/resource_catalog.go +++ b/bundle/internal/tf/schema/resource_catalog.go @@ -3,17 +3,18 @@ package schema type ResourceCatalog struct { - Comment string `json:"comment,omitempty"` - ConnectionName string `json:"connection_name,omitempty"` - ForceDestroy bool `json:"force_destroy,omitempty"` - Id string `json:"id,omitempty"` - IsolationMode string `json:"isolation_mode,omitempty"` - MetastoreId string `json:"metastore_id,omitempty"` - Name string `json:"name"` - Options map[string]string `json:"options,omitempty"` - Owner string `json:"owner,omitempty"` - Properties map[string]string `json:"properties,omitempty"` - ProviderName string `json:"provider_name,omitempty"` - ShareName string `json:"share_name,omitempty"` - StorageRoot string `json:"storage_root,omitempty"` + Comment string `json:"comment,omitempty"` + ConnectionName string `json:"connection_name,omitempty"` + EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` + Id string `json:"id,omitempty"` + IsolationMode string `json:"isolation_mode,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Options map[string]string `json:"options,omitempty"` + Owner string `json:"owner,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + ProviderName string `json:"provider_name,omitempty"` + ShareName string `json:"share_name,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_cluster.go b/bundle/internal/tf/schema/resource_cluster.go index 1d5a5ef25..e4106d049 100644 --- a/bundle/internal/tf/schema/resource_cluster.go +++ b/bundle/internal/tf/schema/resource_cluster.go @@ -10,7 +10,9 @@ type ResourceClusterAutoscale struct { type ResourceClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -18,10 +20,16 @@ type ResourceClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourceClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourceClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourceClusterClusterLogConfDbfs struct { @@ -74,7 +82,7 @@ type ResourceClusterGcpAttributes struct { } type ResourceClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScriptsDbfs struct { @@ -82,11 +90,11 @@ type ResourceClusterInitScriptsDbfs struct { } type ResourceClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScriptsS3 struct { @@ -100,11 +108,11 @@ type ResourceClusterInitScriptsS3 struct { } type ResourceClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceClusterInitScripts struct { @@ -134,12 +142,13 @@ type ResourceClusterLibraryPypi struct { } type ResourceClusterLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceClusterLibraryCran `json:"cran,omitempty"` - Maven *ResourceClusterLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceClusterLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceClusterLibraryPypi `json:"pypi,omitempty"` } type ResourceClusterWorkloadTypeClients struct { diff --git a/bundle/internal/tf/schema/resource_cluster_policy.go b/bundle/internal/tf/schema/resource_cluster_policy.go index 637fe6455..d8111fef2 100644 --- a/bundle/internal/tf/schema/resource_cluster_policy.go +++ b/bundle/internal/tf/schema/resource_cluster_policy.go @@ -19,12 +19,13 @@ type ResourceClusterPolicyLibrariesPypi struct { } type ResourceClusterPolicyLibraries struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceClusterPolicyLibrariesCran `json:"cran,omitempty"` - Maven *ResourceClusterPolicyLibrariesMaven `json:"maven,omitempty"` - Pypi *ResourceClusterPolicyLibrariesPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceClusterPolicyLibrariesCran `json:"cran,omitempty"` + Maven *ResourceClusterPolicyLibrariesMaven `json:"maven,omitempty"` + Pypi *ResourceClusterPolicyLibrariesPypi `json:"pypi,omitempty"` } type ResourceClusterPolicy struct { diff --git a/bundle/internal/tf/schema/resource_compliance_security_profile_workspace_setting.go b/bundle/internal/tf/schema/resource_compliance_security_profile_workspace_setting.go new file mode 100644 index 000000000..50815f753 --- /dev/null +++ b/bundle/internal/tf/schema/resource_compliance_security_profile_workspace_setting.go @@ -0,0 +1,15 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace struct { + ComplianceStandards []string `json:"compliance_standards,omitempty"` + IsEnabled bool `json:"is_enabled,omitempty"` +} + +type ResourceComplianceSecurityProfileWorkspaceSetting struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + SettingName string `json:"setting_name,omitempty"` + ComplianceSecurityProfileWorkspace *ResourceComplianceSecurityProfileWorkspaceSettingComplianceSecurityProfileWorkspace `json:"compliance_security_profile_workspace,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_directory.go b/bundle/internal/tf/schema/resource_directory.go index f418edded..ee7cf1607 100644 --- a/bundle/internal/tf/schema/resource_directory.go +++ b/bundle/internal/tf/schema/resource_directory.go @@ -7,4 +7,5 @@ type ResourceDirectory struct { Id string `json:"id,omitempty"` ObjectId int `json:"object_id,omitempty"` Path string `json:"path"` + WorkspacePath string `json:"workspace_path,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_enhanced_security_monitoring_workspace_setting.go b/bundle/internal/tf/schema/resource_enhanced_security_monitoring_workspace_setting.go new file mode 100644 index 000000000..2f552402a --- /dev/null +++ b/bundle/internal/tf/schema/resource_enhanced_security_monitoring_workspace_setting.go @@ -0,0 +1,14 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceEnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace struct { + IsEnabled bool `json:"is_enabled,omitempty"` +} + +type ResourceEnhancedSecurityMonitoringWorkspaceSetting struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + SettingName string `json:"setting_name,omitempty"` + EnhancedSecurityMonitoringWorkspace *ResourceEnhancedSecurityMonitoringWorkspaceSettingEnhancedSecurityMonitoringWorkspace `json:"enhanced_security_monitoring_workspace,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_file.go b/bundle/internal/tf/schema/resource_file.go new file mode 100644 index 000000000..40a307c9b --- /dev/null +++ b/bundle/internal/tf/schema/resource_file.go @@ -0,0 +1,14 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceFile struct { + ContentBase64 string `json:"content_base64,omitempty"` + FileSize int `json:"file_size,omitempty"` + Id string `json:"id,omitempty"` + Md5 string `json:"md5,omitempty"` + ModificationTime string `json:"modification_time,omitempty"` + Path string `json:"path"` + RemoteFileModified bool `json:"remote_file_modified,omitempty"` + Source string `json:"source,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_grant.go b/bundle/internal/tf/schema/resource_grant.go new file mode 100644 index 000000000..d8569f304 --- /dev/null +++ b/bundle/internal/tf/schema/resource_grant.go @@ -0,0 +1,22 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceGrant struct { + Catalog string `json:"catalog,omitempty"` + ExternalLocation string `json:"external_location,omitempty"` + ForeignConnection string `json:"foreign_connection,omitempty"` + Function string `json:"function,omitempty"` + Id string `json:"id,omitempty"` + Metastore string `json:"metastore,omitempty"` + Model string `json:"model,omitempty"` + Pipeline string `json:"pipeline,omitempty"` + Principal string `json:"principal"` + Privileges []string `json:"privileges"` + Recipient string `json:"recipient,omitempty"` + Schema string `json:"schema,omitempty"` + Share string `json:"share,omitempty"` + StorageCredential string `json:"storage_credential,omitempty"` + Table string `json:"table,omitempty"` + Volume string `json:"volume,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_grants.go b/bundle/internal/tf/schema/resource_grants.go index 09b958f82..dd00152fb 100644 --- a/bundle/internal/tf/schema/resource_grants.go +++ b/bundle/internal/tf/schema/resource_grants.go @@ -13,14 +13,14 @@ type ResourceGrants struct { ForeignConnection string `json:"foreign_connection,omitempty"` Function string `json:"function,omitempty"` Id string `json:"id,omitempty"` - MaterializedView string `json:"materialized_view,omitempty"` Metastore string `json:"metastore,omitempty"` Model string `json:"model,omitempty"` + Pipeline string `json:"pipeline,omitempty"` + Recipient string `json:"recipient,omitempty"` Schema string `json:"schema,omitempty"` Share string `json:"share,omitempty"` StorageCredential string `json:"storage_credential,omitempty"` Table string `json:"table,omitempty"` - View string `json:"view,omitempty"` Volume string `json:"volume,omitempty"` Grant []ResourceGrantsGrant `json:"grant,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_instance_pool.go b/bundle/internal/tf/schema/resource_instance_pool.go index f524b3fce..0097a4913 100644 --- a/bundle/internal/tf/schema/resource_instance_pool.go +++ b/bundle/internal/tf/schema/resource_instance_pool.go @@ -27,6 +27,7 @@ type ResourceInstancePoolDiskSpec struct { type ResourceInstancePoolGcpAttributes struct { GcpAvailability string `json:"gcp_availability,omitempty"` LocalSsdCount int `json:"local_ssd_count,omitempty"` + ZoneId string `json:"zone_id,omitempty"` } type ResourceInstancePoolInstancePoolFleetAttributesFleetOnDemandOption struct { diff --git a/bundle/internal/tf/schema/resource_job.go b/bundle/internal/tf/schema/resource_job.go index 7884efd79..42b648b0f 100644 --- a/bundle/internal/tf/schema/resource_job.go +++ b/bundle/internal/tf/schema/resource_job.go @@ -2,15 +2,6 @@ package schema -type ResourceJobComputeSpec struct { - Kind string `json:"kind,omitempty"` -} - -type ResourceJobCompute struct { - ComputeKey string `json:"compute_key,omitempty"` - Spec *ResourceJobComputeSpec `json:"spec,omitempty"` -} - type ResourceJobContinuous struct { PauseStatus string `json:"pause_status,omitempty"` } @@ -21,6 +12,7 @@ type ResourceJobDbtTask struct { ProfilesDirectory string `json:"profiles_directory,omitempty"` ProjectDirectory string `json:"project_directory,omitempty"` Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` WarehouseId string `json:"warehouse_id,omitempty"` } @@ -34,9 +26,24 @@ type ResourceJobEmailNotifications struct { OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } +type ResourceJobEnvironmentSpec struct { + Client string `json:"client"` + Dependencies []string `json:"dependencies,omitempty"` +} + +type ResourceJobEnvironment struct { + EnvironmentKey string `json:"environment_key"` + Spec *ResourceJobEnvironmentSpec `json:"spec,omitempty"` +} + +type ResourceJobGitSourceGitSnapshot struct { + UsedCommit string `json:"used_commit,omitempty"` +} + type ResourceJobGitSourceJobSource struct { DirtyState string `json:"dirty_state,omitempty"` ImportFromGitBranch string `json:"import_from_git_branch"` @@ -44,18 +51,19 @@ type ResourceJobGitSourceJobSource struct { } type ResourceJobGitSource struct { - Branch string `json:"branch,omitempty"` - Commit string `json:"commit,omitempty"` - Provider string `json:"provider,omitempty"` - Tag string `json:"tag,omitempty"` - Url string `json:"url"` - JobSource *ResourceJobGitSourceJobSource `json:"job_source,omitempty"` + Branch string `json:"branch,omitempty"` + Commit string `json:"commit,omitempty"` + Provider string `json:"provider,omitempty"` + Tag string `json:"tag,omitempty"` + Url string `json:"url"` + GitSnapshot *ResourceJobGitSourceGitSnapshot `json:"git_snapshot,omitempty"` + JobSource *ResourceJobGitSourceJobSource `json:"job_source,omitempty"` } type ResourceJobHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type ResourceJobHealth struct { @@ -70,7 +78,9 @@ type ResourceJobJobClusterNewClusterAutoscale struct { type ResourceJobJobClusterNewClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -78,10 +88,16 @@ type ResourceJobJobClusterNewClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourceJobJobClusterNewClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourceJobJobClusterNewClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceJobJobClusterNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourceJobJobClusterNewClusterClusterLogConfDbfs struct { @@ -134,7 +150,7 @@ type ResourceJobJobClusterNewClusterGcpAttributes struct { } type ResourceJobJobClusterNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScriptsDbfs struct { @@ -142,11 +158,11 @@ type ResourceJobJobClusterNewClusterInitScriptsDbfs struct { } type ResourceJobJobClusterNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScriptsS3 struct { @@ -160,11 +176,11 @@ type ResourceJobJobClusterNewClusterInitScriptsS3 struct { } type ResourceJobJobClusterNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobJobClusterNewClusterInitScripts struct { @@ -177,6 +193,32 @@ type ResourceJobJobClusterNewClusterInitScripts struct { Workspace *ResourceJobJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } +type ResourceJobJobClusterNewClusterLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobJobClusterNewClusterLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobJobClusterNewClusterLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobJobClusterNewClusterLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobJobClusterNewClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobJobClusterNewClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobJobClusterNewClusterLibraryPypi `json:"pypi,omitempty"` +} + type ResourceJobJobClusterNewClusterWorkloadTypeClients struct { Jobs bool `json:"jobs,omitempty"` Notebooks bool `json:"notebooks,omitempty"` @@ -188,7 +230,6 @@ type ResourceJobJobClusterNewClusterWorkloadType struct { type ResourceJobJobClusterNewCluster struct { ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` - AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` @@ -216,11 +257,12 @@ type ResourceJobJobClusterNewCluster struct { DockerImage *ResourceJobJobClusterNewClusterDockerImage `json:"docker_image,omitempty"` GcpAttributes *ResourceJobJobClusterNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` InitScripts []ResourceJobJobClusterNewClusterInitScripts `json:"init_scripts,omitempty"` + Library []ResourceJobJobClusterNewClusterLibrary `json:"library,omitempty"` WorkloadType *ResourceJobJobClusterNewClusterWorkloadType `json:"workload_type,omitempty"` } type ResourceJobJobCluster struct { - JobClusterKey string `json:"job_cluster_key,omitempty"` + JobClusterKey string `json:"job_cluster_key"` NewCluster *ResourceJobJobClusterNewCluster `json:"new_cluster,omitempty"` } @@ -241,12 +283,13 @@ type ResourceJobLibraryPypi struct { } type ResourceJobLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceJobLibraryCran `json:"cran,omitempty"` - Maven *ResourceJobLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceJobLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobLibraryPypi `json:"pypi,omitempty"` } type ResourceJobNewClusterAutoscale struct { @@ -257,7 +300,9 @@ type ResourceJobNewClusterAutoscale struct { type ResourceJobNewClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -265,10 +310,16 @@ type ResourceJobNewClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourceJobNewClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourceJobNewClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceJobNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourceJobNewClusterClusterLogConfDbfs struct { @@ -321,7 +372,7 @@ type ResourceJobNewClusterGcpAttributes struct { } type ResourceJobNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScriptsDbfs struct { @@ -329,11 +380,11 @@ type ResourceJobNewClusterInitScriptsDbfs struct { } type ResourceJobNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScriptsS3 struct { @@ -347,11 +398,11 @@ type ResourceJobNewClusterInitScriptsS3 struct { } type ResourceJobNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobNewClusterInitScripts struct { @@ -364,6 +415,32 @@ type ResourceJobNewClusterInitScripts struct { Workspace *ResourceJobNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } +type ResourceJobNewClusterLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobNewClusterLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobNewClusterLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobNewClusterLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobNewClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobNewClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobNewClusterLibraryPypi `json:"pypi,omitempty"` +} + type ResourceJobNewClusterWorkloadTypeClients struct { Jobs bool `json:"jobs,omitempty"` Notebooks bool `json:"notebooks,omitempty"` @@ -375,7 +452,6 @@ type ResourceJobNewClusterWorkloadType struct { type ResourceJobNewCluster struct { ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` - AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` @@ -403,6 +479,7 @@ type ResourceJobNewCluster struct { DockerImage *ResourceJobNewClusterDockerImage `json:"docker_image,omitempty"` GcpAttributes *ResourceJobNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` InitScripts []ResourceJobNewClusterInitScripts `json:"init_scripts,omitempty"` + Library []ResourceJobNewClusterLibrary `json:"library,omitempty"` WorkloadType *ResourceJobNewClusterWorkloadType `json:"workload_type,omitempty"` } @@ -410,6 +487,7 @@ type ResourceJobNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type ResourceJobNotificationSettings struct { @@ -418,8 +496,8 @@ type ResourceJobNotificationSettings struct { } type ResourceJobParameter struct { - Default string `json:"default,omitempty"` - Name string `json:"name,omitempty"` + Default string `json:"default"` + Name string `json:"name"` } type ResourceJobPipelineTask struct { @@ -471,9 +549,9 @@ type ResourceJobSparkSubmitTask struct { } type ResourceJobTaskConditionTask struct { - Left string `json:"left,omitempty"` - Op string `json:"op,omitempty"` - Right string `json:"right,omitempty"` + Left string `json:"left"` + Op string `json:"op"` + Right string `json:"right"` } type ResourceJobTaskDbtTask struct { @@ -482,6 +560,7 @@ type ResourceJobTaskDbtTask struct { ProfilesDirectory string `json:"profiles_directory,omitempty"` ProjectDirectory string `json:"project_directory,omitempty"` Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` WarehouseId string `json:"warehouse_id,omitempty"` } @@ -491,16 +570,439 @@ type ResourceJobTaskDependsOn struct { } type ResourceJobTaskEmailNotifications struct { + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []string `json:"on_failure,omitempty"` OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []string `json:"on_success,omitempty"` } +type ResourceJobTaskForEachTaskTaskConditionTask struct { + Left string `json:"left"` + Op string `json:"op"` + Right string `json:"right"` +} + +type ResourceJobTaskForEachTaskTaskDbtTask struct { + Catalog string `json:"catalog,omitempty"` + Commands []string `json:"commands"` + ProfilesDirectory string `json:"profiles_directory,omitempty"` + ProjectDirectory string `json:"project_directory,omitempty"` + Schema string `json:"schema,omitempty"` + Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskDependsOn struct { + Outcome string `json:"outcome,omitempty"` + TaskKey string `json:"task_key"` +} + +type ResourceJobTaskForEachTaskTaskEmailNotifications struct { + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` + OnDurationWarningThresholdExceeded []string `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []string `json:"on_failure,omitempty"` + OnStart []string `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []string `json:"on_streaming_backlog_exceeded,omitempty"` + OnSuccess []string `json:"on_success,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskHealthRules struct { + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` +} + +type ResourceJobTaskForEachTaskTaskHealth struct { + Rules []ResourceJobTaskForEachTaskTaskHealthRules `json:"rules,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobTaskForEachTaskTaskLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobTaskForEachTaskTaskLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobTaskForEachTaskTaskLibraryPypi `json:"pypi,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterAutoscale struct { + MaxWorkers int `json:"max_workers,omitempty"` + MinWorkers int `json:"min_workers,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes struct { + Availability string `json:"availability,omitempty"` + EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` + EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` + EbsVolumeType string `json:"ebs_volume_type,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` + ZoneId string `json:"zone_id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes struct { + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceJobTaskForEachTaskTaskNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfDbfs struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfS3 struct { + CannedAcl string `json:"canned_acl,omitempty"` + Destination string `json:"destination"` + EnableEncryption bool `json:"enable_encryption,omitempty"` + EncryptionType string `json:"encryption_type,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + KmsKey string `json:"kms_key,omitempty"` + Region string `json:"region,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterLogConf struct { + Dbfs *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfDbfs `json:"dbfs,omitempty"` + S3 *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConfS3 `json:"s3,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo struct { + MountOptions string `json:"mount_options,omitempty"` + ServerAddress string `json:"server_address"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfo struct { + LocalMountDirPath string `json:"local_mount_dir_path"` + RemoteMountDirPath string `json:"remote_mount_dir_path,omitempty"` + NetworkFilesystemInfo *ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfoNetworkFilesystemInfo `json:"network_filesystem_info,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterDockerImageBasicAuth struct { + Password string `json:"password"` + Username string `json:"username"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterDockerImage struct { + Url string `json:"url"` + BasicAuth *ResourceJobTaskForEachTaskTaskNewClusterDockerImageBasicAuth `json:"basic_auth,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterGcpAttributes struct { + Availability string `json:"availability,omitempty"` + BootDiskSize int `json:"boot_disk_size,omitempty"` + GoogleServiceAccount string `json:"google_service_account,omitempty"` + LocalSsdCount int `json:"local_ssd_count,omitempty"` + UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` + ZoneId string `json:"zone_id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsAbfss struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsDbfs struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsFile struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsGcs struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsS3 struct { + CannedAcl string `json:"canned_acl,omitempty"` + Destination string `json:"destination"` + EnableEncryption bool `json:"enable_encryption,omitempty"` + EncryptionType string `json:"encryption_type,omitempty"` + Endpoint string `json:"endpoint,omitempty"` + KmsKey string `json:"kms_key,omitempty"` + Region string `json:"region,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsVolumes struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScriptsWorkspace struct { + Destination string `json:"destination"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterInitScripts struct { + Abfss *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"` + Dbfs *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"` + File *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsFile `json:"file,omitempty"` + Gcs *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"` + S3 *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsS3 `json:"s3,omitempty"` + Volumes *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsVolumes `json:"volumes,omitempty"` + Workspace *ResourceJobTaskForEachTaskTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobTaskForEachTaskTaskNewClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobTaskForEachTaskTaskNewClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobTaskForEachTaskTaskNewClusterLibraryPypi `json:"pypi,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterWorkloadTypeClients struct { + Jobs bool `json:"jobs,omitempty"` + Notebooks bool `json:"notebooks,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewClusterWorkloadType struct { + Clients *ResourceJobTaskForEachTaskTaskNewClusterWorkloadTypeClients `json:"clients,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNewCluster struct { + ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` + ClusterId string `json:"cluster_id,omitempty"` + ClusterName string `json:"cluster_name,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` + DataSecurityMode string `json:"data_security_mode,omitempty"` + DriverInstancePoolId string `json:"driver_instance_pool_id,omitempty"` + DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` + EnableElasticDisk bool `json:"enable_elastic_disk,omitempty"` + EnableLocalDiskEncryption bool `json:"enable_local_disk_encryption,omitempty"` + IdempotencyToken string `json:"idempotency_token,omitempty"` + InstancePoolId string `json:"instance_pool_id,omitempty"` + NodeTypeId string `json:"node_type_id,omitempty"` + NumWorkers int `json:"num_workers,omitempty"` + PolicyId string `json:"policy_id,omitempty"` + RuntimeEngine string `json:"runtime_engine,omitempty"` + SingleUserName string `json:"single_user_name,omitempty"` + SparkConf map[string]string `json:"spark_conf,omitempty"` + SparkEnvVars map[string]string `json:"spark_env_vars,omitempty"` + SparkVersion string `json:"spark_version"` + SshPublicKeys []string `json:"ssh_public_keys,omitempty"` + Autoscale *ResourceJobTaskForEachTaskTaskNewClusterAutoscale `json:"autoscale,omitempty"` + AwsAttributes *ResourceJobTaskForEachTaskTaskNewClusterAwsAttributes `json:"aws_attributes,omitempty"` + AzureAttributes *ResourceJobTaskForEachTaskTaskNewClusterAzureAttributes `json:"azure_attributes,omitempty"` + ClusterLogConf *ResourceJobTaskForEachTaskTaskNewClusterClusterLogConf `json:"cluster_log_conf,omitempty"` + ClusterMountInfo []ResourceJobTaskForEachTaskTaskNewClusterClusterMountInfo `json:"cluster_mount_info,omitempty"` + DockerImage *ResourceJobTaskForEachTaskTaskNewClusterDockerImage `json:"docker_image,omitempty"` + GcpAttributes *ResourceJobTaskForEachTaskTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` + InitScripts []ResourceJobTaskForEachTaskTaskNewClusterInitScripts `json:"init_scripts,omitempty"` + Library []ResourceJobTaskForEachTaskTaskNewClusterLibrary `json:"library,omitempty"` + WorkloadType *ResourceJobTaskForEachTaskTaskNewClusterWorkloadType `json:"workload_type,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNotebookTask struct { + BaseParameters map[string]string `json:"base_parameters,omitempty"` + NotebookPath string `json:"notebook_path"` + Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskNotificationSettings struct { + AlertOnLastAttempt bool `json:"alert_on_last_attempt,omitempty"` + NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"` + NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskPipelineTask struct { + FullRefresh bool `json:"full_refresh,omitempty"` + PipelineId string `json:"pipeline_id"` +} + +type ResourceJobTaskForEachTaskTaskPythonWheelTask struct { + EntryPoint string `json:"entry_point,omitempty"` + NamedParameters map[string]string `json:"named_parameters,omitempty"` + PackageName string `json:"package_name,omitempty"` + Parameters []string `json:"parameters,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskRunJobTaskPipelineParams struct { + FullRefresh bool `json:"full_refresh,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskRunJobTask struct { + DbtCommands []string `json:"dbt_commands,omitempty"` + JarParams []string `json:"jar_params,omitempty"` + JobId int `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` + NotebookParams map[string]string `json:"notebook_params,omitempty"` + PythonNamedParams map[string]string `json:"python_named_params,omitempty"` + PythonParams []string `json:"python_params,omitempty"` + SparkSubmitParams []string `json:"spark_submit_params,omitempty"` + SqlParams map[string]string `json:"sql_params,omitempty"` + PipelineParams *ResourceJobTaskForEachTaskTaskRunJobTaskPipelineParams `json:"pipeline_params,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSparkJarTask struct { + JarUri string `json:"jar_uri,omitempty"` + MainClassName string `json:"main_class_name,omitempty"` + Parameters []string `json:"parameters,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSparkPythonTask struct { + Parameters []string `json:"parameters,omitempty"` + PythonFile string `json:"python_file"` + Source string `json:"source,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSparkSubmitTask struct { + Parameters []string `json:"parameters,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskAlertSubscriptions struct { + DestinationId string `json:"destination_id,omitempty"` + UserName string `json:"user_name,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskAlert struct { + AlertId string `json:"alert_id"` + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + Subscriptions []ResourceJobTaskForEachTaskTaskSqlTaskAlertSubscriptions `json:"subscriptions,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskDashboardSubscriptions struct { + DestinationId string `json:"destination_id,omitempty"` + UserName string `json:"user_name,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskDashboard struct { + CustomSubject string `json:"custom_subject,omitempty"` + DashboardId string `json:"dashboard_id"` + PauseSubscriptions bool `json:"pause_subscriptions,omitempty"` + Subscriptions []ResourceJobTaskForEachTaskTaskSqlTaskDashboardSubscriptions `json:"subscriptions,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskFile struct { + Path string `json:"path"` + Source string `json:"source,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskSqlTaskQuery struct { + QueryId string `json:"query_id"` +} + +type ResourceJobTaskForEachTaskTaskSqlTask struct { + Parameters map[string]string `json:"parameters,omitempty"` + WarehouseId string `json:"warehouse_id"` + Alert *ResourceJobTaskForEachTaskTaskSqlTaskAlert `json:"alert,omitempty"` + Dashboard *ResourceJobTaskForEachTaskTaskSqlTaskDashboard `json:"dashboard,omitempty"` + File *ResourceJobTaskForEachTaskTaskSqlTaskFile `json:"file,omitempty"` + Query *ResourceJobTaskForEachTaskTaskSqlTaskQuery `json:"query,omitempty"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { + Id string `json:"id"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnFailure struct { + Id string `json:"id"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStart struct { + Id string `json:"id"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotificationsOnSuccess struct { + Id string `json:"id"` +} + +type ResourceJobTaskForEachTaskTaskWebhookNotifications struct { + OnDurationWarningThresholdExceeded []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` + OnFailure []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` + OnStart []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` + OnSuccess []ResourceJobTaskForEachTaskTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` +} + +type ResourceJobTaskForEachTaskTask struct { + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + ConditionTask *ResourceJobTaskForEachTaskTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskForEachTaskTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskForEachTaskTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskForEachTaskTaskEmailNotifications `json:"email_notifications,omitempty"` + Health *ResourceJobTaskForEachTaskTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskForEachTaskTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskForEachTaskTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskForEachTaskTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskForEachTaskTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskForEachTaskTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskForEachTaskTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskForEachTaskTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskForEachTaskTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskForEachTaskTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskForEachTaskTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskForEachTaskTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskForEachTaskTaskWebhookNotifications `json:"webhook_notifications,omitempty"` +} + +type ResourceJobTaskForEachTask struct { + Concurrency int `json:"concurrency,omitempty"` + Inputs string `json:"inputs"` + Task *ResourceJobTaskForEachTaskTask `json:"task,omitempty"` +} + type ResourceJobTaskHealthRules struct { - Metric string `json:"metric,omitempty"` - Op string `json:"op,omitempty"` - Value int `json:"value,omitempty"` + Metric string `json:"metric"` + Op string `json:"op"` + Value int `json:"value"` } type ResourceJobTaskHealth struct { @@ -524,12 +1026,13 @@ type ResourceJobTaskLibraryPypi struct { } type ResourceJobTaskLibrary struct { - Egg string `json:"egg,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceJobTaskLibraryCran `json:"cran,omitempty"` - Maven *ResourceJobTaskLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceJobTaskLibraryPypi `json:"pypi,omitempty"` + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobTaskLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobTaskLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobTaskLibraryPypi `json:"pypi,omitempty"` } type ResourceJobTaskNewClusterAutoscale struct { @@ -540,7 +1043,9 @@ type ResourceJobTaskNewClusterAutoscale struct { type ResourceJobTaskNewClusterAwsAttributes struct { Availability string `json:"availability,omitempty"` EbsVolumeCount int `json:"ebs_volume_count,omitempty"` + EbsVolumeIops int `json:"ebs_volume_iops,omitempty"` EbsVolumeSize int `json:"ebs_volume_size,omitempty"` + EbsVolumeThroughput int `json:"ebs_volume_throughput,omitempty"` EbsVolumeType string `json:"ebs_volume_type,omitempty"` FirstOnDemand int `json:"first_on_demand,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -548,10 +1053,16 @@ type ResourceJobTaskNewClusterAwsAttributes struct { ZoneId string `json:"zone_id,omitempty"` } +type ResourceJobTaskNewClusterAzureAttributesLogAnalyticsInfo struct { + LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` +} + type ResourceJobTaskNewClusterAzureAttributes struct { - Availability string `json:"availability,omitempty"` - FirstOnDemand int `json:"first_on_demand,omitempty"` - SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + Availability string `json:"availability,omitempty"` + FirstOnDemand int `json:"first_on_demand,omitempty"` + SpotBidMaxPrice int `json:"spot_bid_max_price,omitempty"` + LogAnalyticsInfo *ResourceJobTaskNewClusterAzureAttributesLogAnalyticsInfo `json:"log_analytics_info,omitempty"` } type ResourceJobTaskNewClusterClusterLogConfDbfs struct { @@ -604,7 +1115,7 @@ type ResourceJobTaskNewClusterGcpAttributes struct { } type ResourceJobTaskNewClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScriptsDbfs struct { @@ -612,11 +1123,11 @@ type ResourceJobTaskNewClusterInitScriptsDbfs struct { } type ResourceJobTaskNewClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScriptsS3 struct { @@ -630,11 +1141,11 @@ type ResourceJobTaskNewClusterInitScriptsS3 struct { } type ResourceJobTaskNewClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourceJobTaskNewClusterInitScripts struct { @@ -647,6 +1158,32 @@ type ResourceJobTaskNewClusterInitScripts struct { Workspace *ResourceJobTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"` } +type ResourceJobTaskNewClusterLibraryCran struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskNewClusterLibraryMaven struct { + Coordinates string `json:"coordinates"` + Exclusions []string `json:"exclusions,omitempty"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskNewClusterLibraryPypi struct { + Package string `json:"package"` + Repo string `json:"repo,omitempty"` +} + +type ResourceJobTaskNewClusterLibrary struct { + Egg string `json:"egg,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceJobTaskNewClusterLibraryCran `json:"cran,omitempty"` + Maven *ResourceJobTaskNewClusterLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceJobTaskNewClusterLibraryPypi `json:"pypi,omitempty"` +} + type ResourceJobTaskNewClusterWorkloadTypeClients struct { Jobs bool `json:"jobs,omitempty"` Notebooks bool `json:"notebooks,omitempty"` @@ -658,7 +1195,6 @@ type ResourceJobTaskNewClusterWorkloadType struct { type ResourceJobTaskNewCluster struct { ApplyPolicyDefaultValues bool `json:"apply_policy_default_values,omitempty"` - AutoterminationMinutes int `json:"autotermination_minutes,omitempty"` ClusterId string `json:"cluster_id,omitempty"` ClusterName string `json:"cluster_name,omitempty"` CustomTags map[string]string `json:"custom_tags,omitempty"` @@ -686,6 +1222,7 @@ type ResourceJobTaskNewCluster struct { DockerImage *ResourceJobTaskNewClusterDockerImage `json:"docker_image,omitempty"` GcpAttributes *ResourceJobTaskNewClusterGcpAttributes `json:"gcp_attributes,omitempty"` InitScripts []ResourceJobTaskNewClusterInitScripts `json:"init_scripts,omitempty"` + Library []ResourceJobTaskNewClusterLibrary `json:"library,omitempty"` WorkloadType *ResourceJobTaskNewClusterWorkloadType `json:"workload_type,omitempty"` } @@ -693,6 +1230,7 @@ type ResourceJobTaskNotebookTask struct { BaseParameters map[string]string `json:"base_parameters,omitempty"` NotebookPath string `json:"notebook_path"` Source string `json:"source,omitempty"` + WarehouseId string `json:"warehouse_id,omitempty"` } type ResourceJobTaskNotificationSettings struct { @@ -713,9 +1251,21 @@ type ResourceJobTaskPythonWheelTask struct { Parameters []string `json:"parameters,omitempty"` } +type ResourceJobTaskRunJobTaskPipelineParams struct { + FullRefresh bool `json:"full_refresh,omitempty"` +} + type ResourceJobTaskRunJobTask struct { - JobId int `json:"job_id"` - JobParameters map[string]string `json:"job_parameters,omitempty"` + DbtCommands []string `json:"dbt_commands,omitempty"` + JarParams []string `json:"jar_params,omitempty"` + JobId int `json:"job_id"` + JobParameters map[string]string `json:"job_parameters,omitempty"` + NotebookParams map[string]string `json:"notebook_params,omitempty"` + PythonNamedParams map[string]string `json:"python_named_params,omitempty"` + PythonParams []string `json:"python_params,omitempty"` + SparkSubmitParams []string `json:"spark_submit_params,omitempty"` + SqlParams map[string]string `json:"sql_params,omitempty"` + PipelineParams *ResourceJobTaskRunJobTaskPipelineParams `json:"pipeline_params,omitempty"` } type ResourceJobTaskSparkJarTask struct { @@ -758,7 +1308,8 @@ type ResourceJobTaskSqlTaskDashboard struct { } type ResourceJobTaskSqlTaskFile struct { - Path string `json:"path"` + Path string `json:"path"` + Source string `json:"source,omitempty"` } type ResourceJobTaskSqlTaskQuery struct { @@ -767,7 +1318,7 @@ type ResourceJobTaskSqlTaskQuery struct { type ResourceJobTaskSqlTask struct { Parameters map[string]string `json:"parameters,omitempty"` - WarehouseId string `json:"warehouse_id,omitempty"` + WarehouseId string `json:"warehouse_id"` Alert *ResourceJobTaskSqlTaskAlert `json:"alert,omitempty"` Dashboard *ResourceJobTaskSqlTaskDashboard `json:"dashboard,omitempty"` File *ResourceJobTaskSqlTaskFile `json:"file,omitempty"` @@ -775,56 +1326,63 @@ type ResourceJobTaskSqlTask struct { } type ResourceJobTaskWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` +} + +type ResourceJobTaskWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` } type ResourceJobTaskWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobTaskWebhookNotifications struct { OnDurationWarningThresholdExceeded []ResourceJobTaskWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []ResourceJobTaskWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []ResourceJobTaskWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []ResourceJobTaskWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []ResourceJobTaskWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } type ResourceJobTask struct { - ComputeKey string `json:"compute_key,omitempty"` - Description string `json:"description,omitempty"` - ExistingClusterId string `json:"existing_cluster_id,omitempty"` - JobClusterKey string `json:"job_cluster_key,omitempty"` - MaxRetries int `json:"max_retries,omitempty"` - MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` - RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` - RunIf string `json:"run_if,omitempty"` - TaskKey string `json:"task_key,omitempty"` - TimeoutSeconds int `json:"timeout_seconds,omitempty"` - ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` - DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` - DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` - EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` - Health *ResourceJobTaskHealth `json:"health,omitempty"` - Library []ResourceJobTaskLibrary `json:"library,omitempty"` - NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` - NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` - NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` - PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` - PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` - RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` - SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` - SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` - SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` - SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` - WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` + Description string `json:"description,omitempty"` + DisableAutoOptimization bool `json:"disable_auto_optimization,omitempty"` + EnvironmentKey string `json:"environment_key,omitempty"` + ExistingClusterId string `json:"existing_cluster_id,omitempty"` + JobClusterKey string `json:"job_cluster_key,omitempty"` + MaxRetries int `json:"max_retries,omitempty"` + MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"` + RetryOnTimeout bool `json:"retry_on_timeout,omitempty"` + RunIf string `json:"run_if,omitempty"` + TaskKey string `json:"task_key"` + TimeoutSeconds int `json:"timeout_seconds,omitempty"` + ConditionTask *ResourceJobTaskConditionTask `json:"condition_task,omitempty"` + DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"` + DependsOn []ResourceJobTaskDependsOn `json:"depends_on,omitempty"` + EmailNotifications *ResourceJobTaskEmailNotifications `json:"email_notifications,omitempty"` + ForEachTask *ResourceJobTaskForEachTask `json:"for_each_task,omitempty"` + Health *ResourceJobTaskHealth `json:"health,omitempty"` + Library []ResourceJobTaskLibrary `json:"library,omitempty"` + NewCluster *ResourceJobTaskNewCluster `json:"new_cluster,omitempty"` + NotebookTask *ResourceJobTaskNotebookTask `json:"notebook_task,omitempty"` + NotificationSettings *ResourceJobTaskNotificationSettings `json:"notification_settings,omitempty"` + PipelineTask *ResourceJobTaskPipelineTask `json:"pipeline_task,omitempty"` + PythonWheelTask *ResourceJobTaskPythonWheelTask `json:"python_wheel_task,omitempty"` + RunJobTask *ResourceJobTaskRunJobTask `json:"run_job_task,omitempty"` + SparkJarTask *ResourceJobTaskSparkJarTask `json:"spark_jar_task,omitempty"` + SparkPythonTask *ResourceJobTaskSparkPythonTask `json:"spark_python_task,omitempty"` + SparkSubmitTask *ResourceJobTaskSparkSubmitTask `json:"spark_submit_task,omitempty"` + SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"` + WebhookNotifications *ResourceJobTaskWebhookNotifications `json:"webhook_notifications,omitempty"` } type ResourceJobTriggerFileArrival struct { @@ -833,31 +1391,58 @@ type ResourceJobTriggerFileArrival struct { WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` } +type ResourceJobTriggerPeriodic struct { + Interval int `json:"interval"` + Unit string `json:"unit"` +} + +type ResourceJobTriggerTable struct { + Condition string `json:"condition,omitempty"` + MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` + TableNames []string `json:"table_names,omitempty"` + WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` +} + +type ResourceJobTriggerTableUpdate struct { + Condition string `json:"condition,omitempty"` + MinTimeBetweenTriggersSeconds int `json:"min_time_between_triggers_seconds,omitempty"` + TableNames []string `json:"table_names"` + WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` +} + type ResourceJobTrigger struct { PauseStatus string `json:"pause_status,omitempty"` FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"` + Periodic *ResourceJobTriggerPeriodic `json:"periodic,omitempty"` + Table *ResourceJobTriggerTable `json:"table,omitempty"` + TableUpdate *ResourceJobTriggerTableUpdate `json:"table_update,omitempty"` } type ResourceJobWebhookNotificationsOnDurationWarningThresholdExceeded struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobWebhookNotificationsOnFailure struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobWebhookNotificationsOnStart struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` +} + +type ResourceJobWebhookNotificationsOnStreamingBacklogExceeded struct { + Id string `json:"id"` } type ResourceJobWebhookNotificationsOnSuccess struct { - Id string `json:"id,omitempty"` + Id string `json:"id"` } type ResourceJobWebhookNotifications struct { OnDurationWarningThresholdExceeded []ResourceJobWebhookNotificationsOnDurationWarningThresholdExceeded `json:"on_duration_warning_threshold_exceeded,omitempty"` OnFailure []ResourceJobWebhookNotificationsOnFailure `json:"on_failure,omitempty"` OnStart []ResourceJobWebhookNotificationsOnStart `json:"on_start,omitempty"` + OnStreamingBacklogExceeded []ResourceJobWebhookNotificationsOnStreamingBacklogExceeded `json:"on_streaming_backlog_exceeded,omitempty"` OnSuccess []ResourceJobWebhookNotificationsOnSuccess `json:"on_success,omitempty"` } @@ -877,11 +1462,11 @@ type ResourceJob struct { Tags map[string]string `json:"tags,omitempty"` TimeoutSeconds int `json:"timeout_seconds,omitempty"` Url string `json:"url,omitempty"` - Compute []ResourceJobCompute `json:"compute,omitempty"` Continuous *ResourceJobContinuous `json:"continuous,omitempty"` DbtTask *ResourceJobDbtTask `json:"dbt_task,omitempty"` Deployment *ResourceJobDeployment `json:"deployment,omitempty"` EmailNotifications *ResourceJobEmailNotifications `json:"email_notifications,omitempty"` + Environment []ResourceJobEnvironment `json:"environment,omitempty"` GitSource *ResourceJobGitSource `json:"git_source,omitempty"` Health *ResourceJobHealth `json:"health,omitempty"` JobCluster []ResourceJobJobCluster `json:"job_cluster,omitempty"` diff --git a/bundle/internal/tf/schema/resource_lakehouse_monitor.go b/bundle/internal/tf/schema/resource_lakehouse_monitor.go new file mode 100644 index 000000000..69dbdd047 --- /dev/null +++ b/bundle/internal/tf/schema/resource_lakehouse_monitor.go @@ -0,0 +1,76 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceLakehouseMonitorCustomMetrics struct { + Definition string `json:"definition"` + InputColumns []string `json:"input_columns"` + Name string `json:"name"` + OutputDataType string `json:"output_data_type"` + Type string `json:"type"` +} + +type ResourceLakehouseMonitorDataClassificationConfig struct { + Enabled bool `json:"enabled,omitempty"` +} + +type ResourceLakehouseMonitorInferenceLog struct { + Granularities []string `json:"granularities"` + LabelCol string `json:"label_col,omitempty"` + ModelIdCol string `json:"model_id_col"` + PredictionCol string `json:"prediction_col"` + PredictionProbaCol string `json:"prediction_proba_col,omitempty"` + ProblemType string `json:"problem_type"` + TimestampCol string `json:"timestamp_col"` +} + +type ResourceLakehouseMonitorNotificationsOnFailure struct { + EmailAddresses []string `json:"email_addresses,omitempty"` +} + +type ResourceLakehouseMonitorNotificationsOnNewClassificationTagDetected struct { + EmailAddresses []string `json:"email_addresses,omitempty"` +} + +type ResourceLakehouseMonitorNotifications struct { + OnFailure *ResourceLakehouseMonitorNotificationsOnFailure `json:"on_failure,omitempty"` + OnNewClassificationTagDetected *ResourceLakehouseMonitorNotificationsOnNewClassificationTagDetected `json:"on_new_classification_tag_detected,omitempty"` +} + +type ResourceLakehouseMonitorSchedule struct { + PauseStatus string `json:"pause_status,omitempty"` + QuartzCronExpression string `json:"quartz_cron_expression"` + TimezoneId string `json:"timezone_id"` +} + +type ResourceLakehouseMonitorSnapshot struct { +} + +type ResourceLakehouseMonitorTimeSeries struct { + Granularities []string `json:"granularities"` + TimestampCol string `json:"timestamp_col"` +} + +type ResourceLakehouseMonitor struct { + AssetsDir string `json:"assets_dir"` + BaselineTableName string `json:"baseline_table_name,omitempty"` + DashboardId string `json:"dashboard_id,omitempty"` + DriftMetricsTableName string `json:"drift_metrics_table_name,omitempty"` + Id string `json:"id,omitempty"` + LatestMonitorFailureMsg string `json:"latest_monitor_failure_msg,omitempty"` + MonitorVersion string `json:"monitor_version,omitempty"` + OutputSchemaName string `json:"output_schema_name"` + ProfileMetricsTableName string `json:"profile_metrics_table_name,omitempty"` + SkipBuiltinDashboard bool `json:"skip_builtin_dashboard,omitempty"` + SlicingExprs []string `json:"slicing_exprs,omitempty"` + Status string `json:"status,omitempty"` + TableName string `json:"table_name"` + WarehouseId string `json:"warehouse_id,omitempty"` + CustomMetrics []ResourceLakehouseMonitorCustomMetrics `json:"custom_metrics,omitempty"` + DataClassificationConfig *ResourceLakehouseMonitorDataClassificationConfig `json:"data_classification_config,omitempty"` + InferenceLog *ResourceLakehouseMonitorInferenceLog `json:"inference_log,omitempty"` + Notifications *ResourceLakehouseMonitorNotifications `json:"notifications,omitempty"` + Schedule *ResourceLakehouseMonitorSchedule `json:"schedule,omitempty"` + Snapshot *ResourceLakehouseMonitorSnapshot `json:"snapshot,omitempty"` + TimeSeries *ResourceLakehouseMonitorTimeSeries `json:"time_series,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_library.go b/bundle/internal/tf/schema/resource_library.go index e2e83fb4f..385d992df 100644 --- a/bundle/internal/tf/schema/resource_library.go +++ b/bundle/internal/tf/schema/resource_library.go @@ -19,12 +19,13 @@ type ResourceLibraryPypi struct { } type ResourceLibrary struct { - ClusterId string `json:"cluster_id"` - Egg string `json:"egg,omitempty"` - Id string `json:"id,omitempty"` - Jar string `json:"jar,omitempty"` - Whl string `json:"whl,omitempty"` - Cran *ResourceLibraryCran `json:"cran,omitempty"` - Maven *ResourceLibraryMaven `json:"maven,omitempty"` - Pypi *ResourceLibraryPypi `json:"pypi,omitempty"` + ClusterId string `json:"cluster_id"` + Egg string `json:"egg,omitempty"` + Id string `json:"id,omitempty"` + Jar string `json:"jar,omitempty"` + Requirements string `json:"requirements,omitempty"` + Whl string `json:"whl,omitempty"` + Cran *ResourceLibraryCran `json:"cran,omitempty"` + Maven *ResourceLibraryMaven `json:"maven,omitempty"` + Pypi *ResourceLibraryPypi `json:"pypi,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_metastore_data_access.go b/bundle/internal/tf/schema/resource_metastore_data_access.go index ec1395f71..155730055 100644 --- a/bundle/internal/tf/schema/resource_metastore_data_access.go +++ b/bundle/internal/tf/schema/resource_metastore_data_access.go @@ -34,12 +34,14 @@ type ResourceMetastoreDataAccessGcpServiceAccountKey struct { type ResourceMetastoreDataAccess struct { Comment string `json:"comment,omitempty"` ForceDestroy bool `json:"force_destroy,omitempty"` + ForceUpdate bool `json:"force_update,omitempty"` Id string `json:"id,omitempty"` IsDefault bool `json:"is_default,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name"` Owner string `json:"owner,omitempty"` ReadOnly bool `json:"read_only,omitempty"` + SkipValidation bool `json:"skip_validation,omitempty"` AwsIamRole *ResourceMetastoreDataAccessAwsIamRole `json:"aws_iam_role,omitempty"` AzureManagedIdentity *ResourceMetastoreDataAccessAzureManagedIdentity `json:"azure_managed_identity,omitempty"` AzureServicePrincipal *ResourceMetastoreDataAccessAzureServicePrincipal `json:"azure_service_principal,omitempty"` diff --git a/bundle/internal/tf/schema/resource_model_serving.go b/bundle/internal/tf/schema/resource_model_serving.go index b0cabbe5a..f5ffbbe5e 100644 --- a/bundle/internal/tf/schema/resource_model_serving.go +++ b/bundle/internal/tf/schema/resource_model_serving.go @@ -2,6 +2,80 @@ package schema +type ResourceModelServingConfigAutoCaptureConfig struct { + CatalogName string `json:"catalog_name,omitempty"` + Enabled bool `json:"enabled,omitempty"` + SchemaName string `json:"schema_name,omitempty"` + TableNamePrefix string `json:"table_name_prefix,omitempty"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelAi21LabsConfig struct { + Ai21LabsApiKey string `json:"ai21labs_api_key"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelAmazonBedrockConfig struct { + AwsAccessKeyId string `json:"aws_access_key_id"` + AwsRegion string `json:"aws_region"` + AwsSecretAccessKey string `json:"aws_secret_access_key"` + BedrockProvider string `json:"bedrock_provider"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelAnthropicConfig struct { + AnthropicApiKey string `json:"anthropic_api_key"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelCohereConfig struct { + CohereApiKey string `json:"cohere_api_key"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServingConfig struct { + DatabricksApiToken string `json:"databricks_api_token"` + DatabricksWorkspaceUrl string `json:"databricks_workspace_url"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig struct { + MicrosoftEntraClientId string `json:"microsoft_entra_client_id,omitempty"` + MicrosoftEntraClientSecret string `json:"microsoft_entra_client_secret,omitempty"` + MicrosoftEntraTenantId string `json:"microsoft_entra_tenant_id,omitempty"` + OpenaiApiBase string `json:"openai_api_base,omitempty"` + OpenaiApiKey string `json:"openai_api_key,omitempty"` + OpenaiApiType string `json:"openai_api_type,omitempty"` + OpenaiApiVersion string `json:"openai_api_version,omitempty"` + OpenaiDeploymentName string `json:"openai_deployment_name,omitempty"` + OpenaiOrganization string `json:"openai_organization,omitempty"` +} + +type ResourceModelServingConfigServedEntitiesExternalModelPalmConfig struct { + PalmApiKey string `json:"palm_api_key"` +} + +type ResourceModelServingConfigServedEntitiesExternalModel struct { + Name string `json:"name"` + Provider string `json:"provider"` + Task string `json:"task"` + Ai21LabsConfig *ResourceModelServingConfigServedEntitiesExternalModelAi21LabsConfig `json:"ai21labs_config,omitempty"` + AmazonBedrockConfig *ResourceModelServingConfigServedEntitiesExternalModelAmazonBedrockConfig `json:"amazon_bedrock_config,omitempty"` + AnthropicConfig *ResourceModelServingConfigServedEntitiesExternalModelAnthropicConfig `json:"anthropic_config,omitempty"` + CohereConfig *ResourceModelServingConfigServedEntitiesExternalModelCohereConfig `json:"cohere_config,omitempty"` + DatabricksModelServingConfig *ResourceModelServingConfigServedEntitiesExternalModelDatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"` + OpenaiConfig *ResourceModelServingConfigServedEntitiesExternalModelOpenaiConfig `json:"openai_config,omitempty"` + PalmConfig *ResourceModelServingConfigServedEntitiesExternalModelPalmConfig `json:"palm_config,omitempty"` +} + +type ResourceModelServingConfigServedEntities struct { + EntityName string `json:"entity_name,omitempty"` + EntityVersion string `json:"entity_version,omitempty"` + EnvironmentVars map[string]string `json:"environment_vars,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + MaxProvisionedThroughput int `json:"max_provisioned_throughput,omitempty"` + MinProvisionedThroughput int `json:"min_provisioned_throughput,omitempty"` + Name string `json:"name,omitempty"` + ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"` + WorkloadSize string `json:"workload_size,omitempty"` + WorkloadType string `json:"workload_type,omitempty"` + ExternalModel *ResourceModelServingConfigServedEntitiesExternalModel `json:"external_model,omitempty"` +} + type ResourceModelServingConfigServedModels struct { EnvironmentVars map[string]string `json:"environment_vars,omitempty"` InstanceProfileArn string `json:"instance_profile_arn,omitempty"` @@ -23,8 +97,16 @@ type ResourceModelServingConfigTrafficConfig struct { } type ResourceModelServingConfig struct { - ServedModels []ResourceModelServingConfigServedModels `json:"served_models,omitempty"` - TrafficConfig *ResourceModelServingConfigTrafficConfig `json:"traffic_config,omitempty"` + AutoCaptureConfig *ResourceModelServingConfigAutoCaptureConfig `json:"auto_capture_config,omitempty"` + ServedEntities []ResourceModelServingConfigServedEntities `json:"served_entities,omitempty"` + ServedModels []ResourceModelServingConfigServedModels `json:"served_models,omitempty"` + TrafficConfig *ResourceModelServingConfigTrafficConfig `json:"traffic_config,omitempty"` +} + +type ResourceModelServingRateLimits struct { + Calls int `json:"calls"` + Key string `json:"key,omitempty"` + RenewalPeriod string `json:"renewal_period"` } type ResourceModelServingTags struct { @@ -33,9 +115,11 @@ type ResourceModelServingTags struct { } type ResourceModelServing struct { - Id string `json:"id,omitempty"` - Name string `json:"name"` - ServingEndpointId string `json:"serving_endpoint_id,omitempty"` - Config *ResourceModelServingConfig `json:"config,omitempty"` - Tags []ResourceModelServingTags `json:"tags,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + RouteOptimized bool `json:"route_optimized,omitempty"` + ServingEndpointId string `json:"serving_endpoint_id,omitempty"` + Config *ResourceModelServingConfig `json:"config,omitempty"` + RateLimits []ResourceModelServingRateLimits `json:"rate_limits,omitempty"` + Tags []ResourceModelServingTags `json:"tags,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_mws_ncc_binding.go b/bundle/internal/tf/schema/resource_mws_ncc_binding.go new file mode 100644 index 000000000..8beafb6f5 --- /dev/null +++ b/bundle/internal/tf/schema/resource_mws_ncc_binding.go @@ -0,0 +1,9 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceMwsNccBinding struct { + Id string `json:"id,omitempty"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id"` + WorkspaceId int `json:"workspace_id"` +} diff --git a/bundle/internal/tf/schema/resource_mws_ncc_private_endpoint_rule.go b/bundle/internal/tf/schema/resource_mws_ncc_private_endpoint_rule.go new file mode 100644 index 000000000..2acb374bc --- /dev/null +++ b/bundle/internal/tf/schema/resource_mws_ncc_private_endpoint_rule.go @@ -0,0 +1,17 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceMwsNccPrivateEndpointRule struct { + ConnectionState string `json:"connection_state,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + Deactivated bool `json:"deactivated,omitempty"` + DeactivatedAt int `json:"deactivated_at,omitempty"` + EndpointName string `json:"endpoint_name,omitempty"` + GroupId string `json:"group_id"` + Id string `json:"id,omitempty"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id"` + ResourceId string `json:"resource_id"` + RuleId string `json:"rule_id,omitempty"` + UpdatedTime int `json:"updated_time,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_mws_network_connectivity_config.go b/bundle/internal/tf/schema/resource_mws_network_connectivity_config.go new file mode 100644 index 000000000..64ebab224 --- /dev/null +++ b/bundle/internal/tf/schema/resource_mws_network_connectivity_config.go @@ -0,0 +1,51 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule struct { + CidrBlocks []string `json:"cidr_blocks,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule struct { + Subnets []string `json:"subnets,omitempty"` + TargetRegion string `json:"target_region,omitempty"` + TargetServices []string `json:"target_services,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRules struct { + AwsStableIpRule *ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAwsStableIpRule `json:"aws_stable_ip_rule,omitempty"` + AzureServiceEndpointRule *ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRulesAzureServiceEndpointRule `json:"azure_service_endpoint_rule,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRules struct { + ConnectionState string `json:"connection_state,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + Deactivated bool `json:"deactivated,omitempty"` + DeactivatedAt int `json:"deactivated_at,omitempty"` + EndpointName string `json:"endpoint_name,omitempty"` + GroupId string `json:"group_id,omitempty"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` + ResourceId string `json:"resource_id,omitempty"` + RuleId string `json:"rule_id,omitempty"` + UpdatedTime int `json:"updated_time,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfigTargetRules struct { + AzurePrivateEndpointRules []ResourceMwsNetworkConnectivityConfigEgressConfigTargetRulesAzurePrivateEndpointRules `json:"azure_private_endpoint_rules,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfigEgressConfig struct { + DefaultRules *ResourceMwsNetworkConnectivityConfigEgressConfigDefaultRules `json:"default_rules,omitempty"` + TargetRules *ResourceMwsNetworkConnectivityConfigEgressConfigTargetRules `json:"target_rules,omitempty"` +} + +type ResourceMwsNetworkConnectivityConfig struct { + AccountId string `json:"account_id,omitempty"` + CreationTime int `json:"creation_time,omitempty"` + Id string `json:"id,omitempty"` + Name string `json:"name"` + NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` + Region string `json:"region"` + UpdatedTime int `json:"updated_time,omitempty"` + EgressConfig *ResourceMwsNetworkConnectivityConfigEgressConfig `json:"egress_config,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_mws_private_access_settings.go b/bundle/internal/tf/schema/resource_mws_private_access_settings.go index 2c9bdfeca..c7c40aabf 100644 --- a/bundle/internal/tf/schema/resource_mws_private_access_settings.go +++ b/bundle/internal/tf/schema/resource_mws_private_access_settings.go @@ -11,5 +11,4 @@ type ResourceMwsPrivateAccessSettings struct { PrivateAccessSettingsName string `json:"private_access_settings_name"` PublicAccessEnabled bool `json:"public_access_enabled,omitempty"` Region string `json:"region"` - Status string `json:"status,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_mws_workspaces.go b/bundle/internal/tf/schema/resource_mws_workspaces.go index 83d0ab909..6c053cb84 100644 --- a/bundle/internal/tf/schema/resource_mws_workspaces.go +++ b/bundle/internal/tf/schema/resource_mws_workspaces.go @@ -40,8 +40,10 @@ type ResourceMwsWorkspaces struct { Cloud string `json:"cloud,omitempty"` CreationTime int `json:"creation_time,omitempty"` CredentialsId string `json:"credentials_id,omitempty"` + CustomTags map[string]string `json:"custom_tags,omitempty"` CustomerManagedKeyId string `json:"customer_managed_key_id,omitempty"` DeploymentName string `json:"deployment_name,omitempty"` + GcpWorkspaceSa string `json:"gcp_workspace_sa,omitempty"` Id string `json:"id,omitempty"` IsNoPublicIpEnabled bool `json:"is_no_public_ip_enabled,omitempty"` Location string `json:"location,omitempty"` diff --git a/bundle/internal/tf/schema/resource_online_table.go b/bundle/internal/tf/schema/resource_online_table.go new file mode 100644 index 000000000..de671eade --- /dev/null +++ b/bundle/internal/tf/schema/resource_online_table.go @@ -0,0 +1,27 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceOnlineTableSpecRunContinuously struct { +} + +type ResourceOnlineTableSpecRunTriggered struct { +} + +type ResourceOnlineTableSpec struct { + PerformFullCopy bool `json:"perform_full_copy,omitempty"` + PipelineId string `json:"pipeline_id,omitempty"` + PrimaryKeyColumns []string `json:"primary_key_columns,omitempty"` + SourceTableFullName string `json:"source_table_full_name,omitempty"` + TimeseriesKey string `json:"timeseries_key,omitempty"` + RunContinuously *ResourceOnlineTableSpecRunContinuously `json:"run_continuously,omitempty"` + RunTriggered *ResourceOnlineTableSpecRunTriggered `json:"run_triggered,omitempty"` +} + +type ResourceOnlineTable struct { + Id string `json:"id,omitempty"` + Name string `json:"name"` + Status []any `json:"status,omitempty"` + TableServingUrl string `json:"table_serving_url,omitempty"` + Spec *ResourceOnlineTableSpec `json:"spec,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_pipeline.go b/bundle/internal/tf/schema/resource_pipeline.go index 72354f621..20c25c1e2 100644 --- a/bundle/internal/tf/schema/resource_pipeline.go +++ b/bundle/internal/tf/schema/resource_pipeline.go @@ -52,7 +52,7 @@ type ResourcePipelineClusterGcpAttributes struct { } type ResourcePipelineClusterInitScriptsAbfss struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScriptsDbfs struct { @@ -60,11 +60,11 @@ type ResourcePipelineClusterInitScriptsDbfs struct { } type ResourcePipelineClusterInitScriptsFile struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScriptsGcs struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScriptsS3 struct { @@ -78,11 +78,11 @@ type ResourcePipelineClusterInitScriptsS3 struct { } type ResourcePipelineClusterInitScriptsVolumes struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScriptsWorkspace struct { - Destination string `json:"destination,omitempty"` + Destination string `json:"destination"` } type ResourcePipelineClusterInitScripts struct { @@ -117,6 +117,11 @@ type ResourcePipelineCluster struct { InitScripts []ResourcePipelineClusterInitScripts `json:"init_scripts,omitempty"` } +type ResourcePipelineDeployment struct { + Kind string `json:"kind,omitempty"` + MetadataFilePath string `json:"metadata_file_path,omitempty"` +} + type ResourcePipelineFilters struct { Exclude []string `json:"exclude,omitempty"` Include []string `json:"include,omitempty"` @@ -165,6 +170,7 @@ type ResourcePipeline struct { Target string `json:"target,omitempty"` Url string `json:"url,omitempty"` Cluster []ResourcePipelineCluster `json:"cluster,omitempty"` + Deployment *ResourcePipelineDeployment `json:"deployment,omitempty"` Filters *ResourcePipelineFilters `json:"filters,omitempty"` Library []ResourcePipelineLibrary `json:"library,omitempty"` Notification []ResourcePipelineNotification `json:"notification,omitempty"` diff --git a/bundle/internal/tf/schema/resource_quality_monitor.go b/bundle/internal/tf/schema/resource_quality_monitor.go new file mode 100644 index 000000000..0fc2abd66 --- /dev/null +++ b/bundle/internal/tf/schema/resource_quality_monitor.go @@ -0,0 +1,76 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceQualityMonitorCustomMetrics struct { + Definition string `json:"definition"` + InputColumns []string `json:"input_columns"` + Name string `json:"name"` + OutputDataType string `json:"output_data_type"` + Type string `json:"type"` +} + +type ResourceQualityMonitorDataClassificationConfig struct { + Enabled bool `json:"enabled,omitempty"` +} + +type ResourceQualityMonitorInferenceLog struct { + Granularities []string `json:"granularities"` + LabelCol string `json:"label_col,omitempty"` + ModelIdCol string `json:"model_id_col"` + PredictionCol string `json:"prediction_col"` + PredictionProbaCol string `json:"prediction_proba_col,omitempty"` + ProblemType string `json:"problem_type"` + TimestampCol string `json:"timestamp_col"` +} + +type ResourceQualityMonitorNotificationsOnFailure struct { + EmailAddresses []string `json:"email_addresses,omitempty"` +} + +type ResourceQualityMonitorNotificationsOnNewClassificationTagDetected struct { + EmailAddresses []string `json:"email_addresses,omitempty"` +} + +type ResourceQualityMonitorNotifications struct { + OnFailure *ResourceQualityMonitorNotificationsOnFailure `json:"on_failure,omitempty"` + OnNewClassificationTagDetected *ResourceQualityMonitorNotificationsOnNewClassificationTagDetected `json:"on_new_classification_tag_detected,omitempty"` +} + +type ResourceQualityMonitorSchedule struct { + PauseStatus string `json:"pause_status,omitempty"` + QuartzCronExpression string `json:"quartz_cron_expression"` + TimezoneId string `json:"timezone_id"` +} + +type ResourceQualityMonitorSnapshot struct { +} + +type ResourceQualityMonitorTimeSeries struct { + Granularities []string `json:"granularities"` + TimestampCol string `json:"timestamp_col"` +} + +type ResourceQualityMonitor struct { + AssetsDir string `json:"assets_dir"` + BaselineTableName string `json:"baseline_table_name,omitempty"` + DashboardId string `json:"dashboard_id,omitempty"` + DriftMetricsTableName string `json:"drift_metrics_table_name,omitempty"` + Id string `json:"id,omitempty"` + LatestMonitorFailureMsg string `json:"latest_monitor_failure_msg,omitempty"` + MonitorVersion string `json:"monitor_version,omitempty"` + OutputSchemaName string `json:"output_schema_name"` + ProfileMetricsTableName string `json:"profile_metrics_table_name,omitempty"` + SkipBuiltinDashboard bool `json:"skip_builtin_dashboard,omitempty"` + SlicingExprs []string `json:"slicing_exprs,omitempty"` + Status string `json:"status,omitempty"` + TableName string `json:"table_name"` + WarehouseId string `json:"warehouse_id,omitempty"` + CustomMetrics []ResourceQualityMonitorCustomMetrics `json:"custom_metrics,omitempty"` + DataClassificationConfig *ResourceQualityMonitorDataClassificationConfig `json:"data_classification_config,omitempty"` + InferenceLog *ResourceQualityMonitorInferenceLog `json:"inference_log,omitempty"` + Notifications *ResourceQualityMonitorNotifications `json:"notifications,omitempty"` + Schedule *ResourceQualityMonitorSchedule `json:"schedule,omitempty"` + Snapshot *ResourceQualityMonitorSnapshot `json:"snapshot,omitempty"` + TimeSeries *ResourceQualityMonitorTimeSeries `json:"time_series,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_recipient.go b/bundle/internal/tf/schema/resource_recipient.go index 47d6de37c..91de4df76 100644 --- a/bundle/internal/tf/schema/resource_recipient.go +++ b/bundle/internal/tf/schema/resource_recipient.go @@ -3,7 +3,11 @@ package schema type ResourceRecipientIpAccessList struct { - AllowedIpAddresses []string `json:"allowed_ip_addresses"` + AllowedIpAddresses []string `json:"allowed_ip_addresses,omitempty"` +} + +type ResourceRecipientPropertiesKvpairs struct { + Properties map[string]string `json:"properties"` } type ResourceRecipientTokens struct { @@ -17,13 +21,23 @@ type ResourceRecipientTokens struct { } type ResourceRecipient struct { - AuthenticationType string `json:"authentication_type"` - Comment string `json:"comment,omitempty"` - DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` - Id string `json:"id,omitempty"` - Name string `json:"name"` - Owner string `json:"owner,omitempty"` - SharingCode string `json:"sharing_code,omitempty"` - IpAccessList *ResourceRecipientIpAccessList `json:"ip_access_list,omitempty"` - Tokens []ResourceRecipientTokens `json:"tokens,omitempty"` + Activated bool `json:"activated,omitempty"` + ActivationUrl string `json:"activation_url,omitempty"` + AuthenticationType string `json:"authentication_type"` + Cloud string `json:"cloud,omitempty"` + Comment string `json:"comment,omitempty"` + CreatedAt int `json:"created_at,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Owner string `json:"owner,omitempty"` + Region string `json:"region,omitempty"` + SharingCode string `json:"sharing_code,omitempty"` + UpdatedAt int `json:"updated_at,omitempty"` + UpdatedBy string `json:"updated_by,omitempty"` + IpAccessList *ResourceRecipientIpAccessList `json:"ip_access_list,omitempty"` + PropertiesKvpairs *ResourceRecipientPropertiesKvpairs `json:"properties_kvpairs,omitempty"` + Tokens []ResourceRecipientTokens `json:"tokens,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_registered_model.go b/bundle/internal/tf/schema/resource_registered_model.go index e4f1c088b..f19b68275 100644 --- a/bundle/internal/tf/schema/resource_registered_model.go +++ b/bundle/internal/tf/schema/resource_registered_model.go @@ -7,6 +7,7 @@ type ResourceRegisteredModel struct { Comment string `json:"comment,omitempty"` Id string `json:"id,omitempty"` Name string `json:"name"` + Owner string `json:"owner,omitempty"` SchemaName string `json:"schema_name"` StorageLocation string `json:"storage_location,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_repo.go b/bundle/internal/tf/schema/resource_repo.go index 583ab097a..6f2945072 100644 --- a/bundle/internal/tf/schema/resource_repo.go +++ b/bundle/internal/tf/schema/resource_repo.go @@ -14,5 +14,6 @@ type ResourceRepo struct { Path string `json:"path,omitempty"` Tag string `json:"tag,omitempty"` Url string `json:"url"` + WorkspacePath string `json:"workspace_path,omitempty"` SparseCheckout *ResourceRepoSparseCheckout `json:"sparse_checkout,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_restrict_workspace_admins_setting.go b/bundle/internal/tf/schema/resource_restrict_workspace_admins_setting.go new file mode 100644 index 000000000..975d501b9 --- /dev/null +++ b/bundle/internal/tf/schema/resource_restrict_workspace_admins_setting.go @@ -0,0 +1,14 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceRestrictWorkspaceAdminsSettingRestrictWorkspaceAdmins struct { + Status string `json:"status"` +} + +type ResourceRestrictWorkspaceAdminsSetting struct { + Etag string `json:"etag,omitempty"` + Id string `json:"id,omitempty"` + SettingName string `json:"setting_name,omitempty"` + RestrictWorkspaceAdmins *ResourceRestrictWorkspaceAdminsSettingRestrictWorkspaceAdmins `json:"restrict_workspace_admins,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_schema.go b/bundle/internal/tf/schema/resource_schema.go index f1949b07f..3ac8d813d 100644 --- a/bundle/internal/tf/schema/resource_schema.go +++ b/bundle/internal/tf/schema/resource_schema.go @@ -3,13 +3,14 @@ package schema type ResourceSchema struct { - CatalogName string `json:"catalog_name"` - Comment string `json:"comment,omitempty"` - ForceDestroy bool `json:"force_destroy,omitempty"` - Id string `json:"id,omitempty"` - MetastoreId string `json:"metastore_id,omitempty"` - Name string `json:"name"` - Owner string `json:"owner,omitempty"` - Properties map[string]string `json:"properties,omitempty"` - StorageRoot string `json:"storage_root,omitempty"` + CatalogName string `json:"catalog_name"` + Comment string `json:"comment,omitempty"` + EnablePredictiveOptimization string `json:"enable_predictive_optimization,omitempty"` + ForceDestroy bool `json:"force_destroy,omitempty"` + Id string `json:"id,omitempty"` + MetastoreId string `json:"metastore_id,omitempty"` + Name string `json:"name"` + Owner string `json:"owner,omitempty"` + Properties map[string]string `json:"properties,omitempty"` + StorageRoot string `json:"storage_root,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_sql_endpoint.go b/bundle/internal/tf/schema/resource_sql_endpoint.go index c48261b96..b49a7cff5 100644 --- a/bundle/internal/tf/schema/resource_sql_endpoint.go +++ b/bundle/internal/tf/schema/resource_sql_endpoint.go @@ -3,15 +3,8 @@ package schema type ResourceSqlEndpointChannel struct { - Name string `json:"name,omitempty"` -} - -type ResourceSqlEndpointOdbcParams struct { - Host string `json:"host,omitempty"` - Hostname string `json:"hostname,omitempty"` - Path string `json:"path"` - Port int `json:"port"` - Protocol string `json:"protocol"` + DbsqlVersion string `json:"dbsql_version,omitempty"` + Name string `json:"name,omitempty"` } type ResourceSqlEndpointTagsCustomTags struct { @@ -24,22 +17,25 @@ type ResourceSqlEndpointTags struct { } type ResourceSqlEndpoint struct { - AutoStopMins int `json:"auto_stop_mins,omitempty"` - ClusterSize string `json:"cluster_size"` - DataSourceId string `json:"data_source_id,omitempty"` - EnablePhoton bool `json:"enable_photon,omitempty"` - EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` - Id string `json:"id,omitempty"` - InstanceProfileArn string `json:"instance_profile_arn,omitempty"` - JdbcUrl string `json:"jdbc_url,omitempty"` - MaxNumClusters int `json:"max_num_clusters,omitempty"` - MinNumClusters int `json:"min_num_clusters,omitempty"` - Name string `json:"name"` - NumClusters int `json:"num_clusters,omitempty"` - SpotInstancePolicy string `json:"spot_instance_policy,omitempty"` - State string `json:"state,omitempty"` - WarehouseType string `json:"warehouse_type,omitempty"` - Channel *ResourceSqlEndpointChannel `json:"channel,omitempty"` - OdbcParams *ResourceSqlEndpointOdbcParams `json:"odbc_params,omitempty"` - Tags *ResourceSqlEndpointTags `json:"tags,omitempty"` + AutoStopMins int `json:"auto_stop_mins,omitempty"` + ClusterSize string `json:"cluster_size"` + CreatorName string `json:"creator_name,omitempty"` + DataSourceId string `json:"data_source_id,omitempty"` + EnablePhoton bool `json:"enable_photon,omitempty"` + EnableServerlessCompute bool `json:"enable_serverless_compute,omitempty"` + Health []any `json:"health,omitempty"` + Id string `json:"id,omitempty"` + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` + JdbcUrl string `json:"jdbc_url,omitempty"` + MaxNumClusters int `json:"max_num_clusters,omitempty"` + MinNumClusters int `json:"min_num_clusters,omitempty"` + Name string `json:"name"` + NumActiveSessions int `json:"num_active_sessions,omitempty"` + NumClusters int `json:"num_clusters,omitempty"` + OdbcParams []any `json:"odbc_params,omitempty"` + SpotInstancePolicy string `json:"spot_instance_policy,omitempty"` + State string `json:"state,omitempty"` + WarehouseType string `json:"warehouse_type,omitempty"` + Channel *ResourceSqlEndpointChannel `json:"channel,omitempty"` + Tags *ResourceSqlEndpointTags `json:"tags,omitempty"` } diff --git a/bundle/internal/tf/schema/resource_sql_table.go b/bundle/internal/tf/schema/resource_sql_table.go index 97a8977bc..51fb3bc0d 100644 --- a/bundle/internal/tf/schema/resource_sql_table.go +++ b/bundle/internal/tf/schema/resource_sql_table.go @@ -18,6 +18,7 @@ type ResourceSqlTable struct { Id string `json:"id,omitempty"` Name string `json:"name"` Options map[string]string `json:"options,omitempty"` + Owner string `json:"owner,omitempty"` Partitions []string `json:"partitions,omitempty"` Properties map[string]string `json:"properties,omitempty"` SchemaName string `json:"schema_name"` diff --git a/bundle/internal/tf/schema/resource_storage_credential.go b/bundle/internal/tf/schema/resource_storage_credential.go index 1f103023d..b565a5c78 100644 --- a/bundle/internal/tf/schema/resource_storage_credential.go +++ b/bundle/internal/tf/schema/resource_storage_credential.go @@ -34,11 +34,14 @@ type ResourceStorageCredentialGcpServiceAccountKey struct { type ResourceStorageCredential struct { Comment string `json:"comment,omitempty"` ForceDestroy bool `json:"force_destroy,omitempty"` + ForceUpdate bool `json:"force_update,omitempty"` Id string `json:"id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Name string `json:"name"` Owner string `json:"owner,omitempty"` ReadOnly bool `json:"read_only,omitempty"` + SkipValidation bool `json:"skip_validation,omitempty"` + StorageCredentialId string `json:"storage_credential_id,omitempty"` AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"` AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"` AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"` diff --git a/bundle/internal/tf/schema/resource_system_schema.go b/bundle/internal/tf/schema/resource_system_schema.go index 09a86103a..fe5b128d6 100644 --- a/bundle/internal/tf/schema/resource_system_schema.go +++ b/bundle/internal/tf/schema/resource_system_schema.go @@ -3,6 +3,7 @@ package schema type ResourceSystemSchema struct { + FullName string `json:"full_name,omitempty"` Id string `json:"id,omitempty"` MetastoreId string `json:"metastore_id,omitempty"` Schema string `json:"schema,omitempty"` diff --git a/bundle/internal/tf/schema/resource_vector_search_endpoint.go b/bundle/internal/tf/schema/resource_vector_search_endpoint.go new file mode 100644 index 000000000..392c78611 --- /dev/null +++ b/bundle/internal/tf/schema/resource_vector_search_endpoint.go @@ -0,0 +1,16 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceVectorSearchEndpoint struct { + CreationTimestamp int `json:"creation_timestamp,omitempty"` + Creator string `json:"creator,omitempty"` + EndpointId string `json:"endpoint_id,omitempty"` + EndpointStatus []any `json:"endpoint_status,omitempty"` + EndpointType string `json:"endpoint_type"` + Id string `json:"id,omitempty"` + LastUpdatedTimestamp int `json:"last_updated_timestamp,omitempty"` + LastUpdatedUser string `json:"last_updated_user,omitempty"` + Name string `json:"name"` + NumIndexes int `json:"num_indexes,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_vector_search_index.go b/bundle/internal/tf/schema/resource_vector_search_index.go new file mode 100644 index 000000000..2ce51576d --- /dev/null +++ b/bundle/internal/tf/schema/resource_vector_search_index.go @@ -0,0 +1,50 @@ +// Generated from Databricks Terraform provider schema. DO NOT EDIT. + +package schema + +type ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingSourceColumns struct { + EmbeddingModelEndpointName string `json:"embedding_model_endpoint_name,omitempty"` + Name string `json:"name,omitempty"` +} + +type ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingVectorColumns struct { + EmbeddingDimension int `json:"embedding_dimension,omitempty"` + Name string `json:"name,omitempty"` +} + +type ResourceVectorSearchIndexDeltaSyncIndexSpec struct { + EmbeddingWritebackTable string `json:"embedding_writeback_table,omitempty"` + PipelineId string `json:"pipeline_id,omitempty"` + PipelineType string `json:"pipeline_type,omitempty"` + SourceTable string `json:"source_table,omitempty"` + EmbeddingSourceColumns []ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingSourceColumns `json:"embedding_source_columns,omitempty"` + EmbeddingVectorColumns []ResourceVectorSearchIndexDeltaSyncIndexSpecEmbeddingVectorColumns `json:"embedding_vector_columns,omitempty"` +} + +type ResourceVectorSearchIndexDirectAccessIndexSpecEmbeddingSourceColumns struct { + EmbeddingModelEndpointName string `json:"embedding_model_endpoint_name,omitempty"` + Name string `json:"name,omitempty"` +} + +type ResourceVectorSearchIndexDirectAccessIndexSpecEmbeddingVectorColumns struct { + EmbeddingDimension int `json:"embedding_dimension,omitempty"` + Name string `json:"name,omitempty"` +} + +type ResourceVectorSearchIndexDirectAccessIndexSpec struct { + SchemaJson string `json:"schema_json,omitempty"` + EmbeddingSourceColumns []ResourceVectorSearchIndexDirectAccessIndexSpecEmbeddingSourceColumns `json:"embedding_source_columns,omitempty"` + EmbeddingVectorColumns []ResourceVectorSearchIndexDirectAccessIndexSpecEmbeddingVectorColumns `json:"embedding_vector_columns,omitempty"` +} + +type ResourceVectorSearchIndex struct { + Creator string `json:"creator,omitempty"` + EndpointName string `json:"endpoint_name"` + Id string `json:"id,omitempty"` + IndexType string `json:"index_type"` + Name string `json:"name"` + PrimaryKey string `json:"primary_key"` + Status []any `json:"status,omitempty"` + DeltaSyncIndexSpec *ResourceVectorSearchIndexDeltaSyncIndexSpec `json:"delta_sync_index_spec,omitempty"` + DirectAccessIndexSpec *ResourceVectorSearchIndexDirectAccessIndexSpec `json:"direct_access_index_spec,omitempty"` +} diff --git a/bundle/internal/tf/schema/resource_volume.go b/bundle/internal/tf/schema/resource_volume.go index 77d499a69..4a82d8e85 100644 --- a/bundle/internal/tf/schema/resource_volume.go +++ b/bundle/internal/tf/schema/resource_volume.go @@ -10,5 +10,6 @@ type ResourceVolume struct { Owner string `json:"owner,omitempty"` SchemaName string `json:"schema_name"` StorageLocation string `json:"storage_location,omitempty"` + VolumePath string `json:"volume_path,omitempty"` VolumeType string `json:"volume_type"` } diff --git a/bundle/internal/tf/schema/resources.go b/bundle/internal/tf/schema/resources.go index 4519a5686..79d71a65f 100644 --- a/bundle/internal/tf/schema/resources.go +++ b/bundle/internal/tf/schema/resources.go @@ -3,171 +3,199 @@ package schema type Resources struct { - AccessControlRuleSet map[string]*ResourceAccessControlRuleSet `json:"databricks_access_control_rule_set,omitempty"` - ArtifactAllowlist map[string]*ResourceArtifactAllowlist `json:"databricks_artifact_allowlist,omitempty"` - AwsS3Mount map[string]*ResourceAwsS3Mount `json:"databricks_aws_s3_mount,omitempty"` - AzureAdlsGen1Mount map[string]*ResourceAzureAdlsGen1Mount `json:"databricks_azure_adls_gen1_mount,omitempty"` - AzureAdlsGen2Mount map[string]*ResourceAzureAdlsGen2Mount `json:"databricks_azure_adls_gen2_mount,omitempty"` - AzureBlobMount map[string]*ResourceAzureBlobMount `json:"databricks_azure_blob_mount,omitempty"` - Catalog map[string]*ResourceCatalog `json:"databricks_catalog,omitempty"` - CatalogWorkspaceBinding map[string]*ResourceCatalogWorkspaceBinding `json:"databricks_catalog_workspace_binding,omitempty"` - Cluster map[string]*ResourceCluster `json:"databricks_cluster,omitempty"` - ClusterPolicy map[string]*ResourceClusterPolicy `json:"databricks_cluster_policy,omitempty"` - Connection map[string]*ResourceConnection `json:"databricks_connection,omitempty"` - DbfsFile map[string]*ResourceDbfsFile `json:"databricks_dbfs_file,omitempty"` - DefaultNamespaceSetting map[string]*ResourceDefaultNamespaceSetting `json:"databricks_default_namespace_setting,omitempty"` - Directory map[string]*ResourceDirectory `json:"databricks_directory,omitempty"` - Entitlements map[string]*ResourceEntitlements `json:"databricks_entitlements,omitempty"` - ExternalLocation map[string]*ResourceExternalLocation `json:"databricks_external_location,omitempty"` - GitCredential map[string]*ResourceGitCredential `json:"databricks_git_credential,omitempty"` - GlobalInitScript map[string]*ResourceGlobalInitScript `json:"databricks_global_init_script,omitempty"` - Grants map[string]*ResourceGrants `json:"databricks_grants,omitempty"` - Group map[string]*ResourceGroup `json:"databricks_group,omitempty"` - GroupInstanceProfile map[string]*ResourceGroupInstanceProfile `json:"databricks_group_instance_profile,omitempty"` - GroupMember map[string]*ResourceGroupMember `json:"databricks_group_member,omitempty"` - GroupRole map[string]*ResourceGroupRole `json:"databricks_group_role,omitempty"` - InstancePool map[string]*ResourceInstancePool `json:"databricks_instance_pool,omitempty"` - InstanceProfile map[string]*ResourceInstanceProfile `json:"databricks_instance_profile,omitempty"` - IpAccessList map[string]*ResourceIpAccessList `json:"databricks_ip_access_list,omitempty"` - Job map[string]*ResourceJob `json:"databricks_job,omitempty"` - Library map[string]*ResourceLibrary `json:"databricks_library,omitempty"` - Metastore map[string]*ResourceMetastore `json:"databricks_metastore,omitempty"` - MetastoreAssignment map[string]*ResourceMetastoreAssignment `json:"databricks_metastore_assignment,omitempty"` - MetastoreDataAccess map[string]*ResourceMetastoreDataAccess `json:"databricks_metastore_data_access,omitempty"` - MlflowExperiment map[string]*ResourceMlflowExperiment `json:"databricks_mlflow_experiment,omitempty"` - MlflowModel map[string]*ResourceMlflowModel `json:"databricks_mlflow_model,omitempty"` - MlflowWebhook map[string]*ResourceMlflowWebhook `json:"databricks_mlflow_webhook,omitempty"` - ModelServing map[string]*ResourceModelServing `json:"databricks_model_serving,omitempty"` - Mount map[string]*ResourceMount `json:"databricks_mount,omitempty"` - MwsCredentials map[string]*ResourceMwsCredentials `json:"databricks_mws_credentials,omitempty"` - MwsCustomerManagedKeys map[string]*ResourceMwsCustomerManagedKeys `json:"databricks_mws_customer_managed_keys,omitempty"` - MwsLogDelivery map[string]*ResourceMwsLogDelivery `json:"databricks_mws_log_delivery,omitempty"` - MwsNetworks map[string]*ResourceMwsNetworks `json:"databricks_mws_networks,omitempty"` - MwsPermissionAssignment map[string]*ResourceMwsPermissionAssignment `json:"databricks_mws_permission_assignment,omitempty"` - MwsPrivateAccessSettings map[string]*ResourceMwsPrivateAccessSettings `json:"databricks_mws_private_access_settings,omitempty"` - MwsStorageConfigurations map[string]*ResourceMwsStorageConfigurations `json:"databricks_mws_storage_configurations,omitempty"` - MwsVpcEndpoint map[string]*ResourceMwsVpcEndpoint `json:"databricks_mws_vpc_endpoint,omitempty"` - MwsWorkspaces map[string]*ResourceMwsWorkspaces `json:"databricks_mws_workspaces,omitempty"` - Notebook map[string]*ResourceNotebook `json:"databricks_notebook,omitempty"` - OboToken map[string]*ResourceOboToken `json:"databricks_obo_token,omitempty"` - PermissionAssignment map[string]*ResourcePermissionAssignment `json:"databricks_permission_assignment,omitempty"` - Permissions map[string]*ResourcePermissions `json:"databricks_permissions,omitempty"` - Pipeline map[string]*ResourcePipeline `json:"databricks_pipeline,omitempty"` - Provider map[string]*ResourceProvider `json:"databricks_provider,omitempty"` - Recipient map[string]*ResourceRecipient `json:"databricks_recipient,omitempty"` - RegisteredModel map[string]*ResourceRegisteredModel `json:"databricks_registered_model,omitempty"` - Repo map[string]*ResourceRepo `json:"databricks_repo,omitempty"` - Schema map[string]*ResourceSchema `json:"databricks_schema,omitempty"` - Secret map[string]*ResourceSecret `json:"databricks_secret,omitempty"` - SecretAcl map[string]*ResourceSecretAcl `json:"databricks_secret_acl,omitempty"` - SecretScope map[string]*ResourceSecretScope `json:"databricks_secret_scope,omitempty"` - ServicePrincipal map[string]*ResourceServicePrincipal `json:"databricks_service_principal,omitempty"` - ServicePrincipalRole map[string]*ResourceServicePrincipalRole `json:"databricks_service_principal_role,omitempty"` - ServicePrincipalSecret map[string]*ResourceServicePrincipalSecret `json:"databricks_service_principal_secret,omitempty"` - Share map[string]*ResourceShare `json:"databricks_share,omitempty"` - SqlAlert map[string]*ResourceSqlAlert `json:"databricks_sql_alert,omitempty"` - SqlDashboard map[string]*ResourceSqlDashboard `json:"databricks_sql_dashboard,omitempty"` - SqlEndpoint map[string]*ResourceSqlEndpoint `json:"databricks_sql_endpoint,omitempty"` - SqlGlobalConfig map[string]*ResourceSqlGlobalConfig `json:"databricks_sql_global_config,omitempty"` - SqlPermissions map[string]*ResourceSqlPermissions `json:"databricks_sql_permissions,omitempty"` - SqlQuery map[string]*ResourceSqlQuery `json:"databricks_sql_query,omitempty"` - SqlTable map[string]*ResourceSqlTable `json:"databricks_sql_table,omitempty"` - SqlVisualization map[string]*ResourceSqlVisualization `json:"databricks_sql_visualization,omitempty"` - SqlWidget map[string]*ResourceSqlWidget `json:"databricks_sql_widget,omitempty"` - StorageCredential map[string]*ResourceStorageCredential `json:"databricks_storage_credential,omitempty"` - SystemSchema map[string]*ResourceSystemSchema `json:"databricks_system_schema,omitempty"` - Table map[string]*ResourceTable `json:"databricks_table,omitempty"` - Token map[string]*ResourceToken `json:"databricks_token,omitempty"` - User map[string]*ResourceUser `json:"databricks_user,omitempty"` - UserInstanceProfile map[string]*ResourceUserInstanceProfile `json:"databricks_user_instance_profile,omitempty"` - UserRole map[string]*ResourceUserRole `json:"databricks_user_role,omitempty"` - Volume map[string]*ResourceVolume `json:"databricks_volume,omitempty"` - WorkspaceConf map[string]*ResourceWorkspaceConf `json:"databricks_workspace_conf,omitempty"` - WorkspaceFile map[string]*ResourceWorkspaceFile `json:"databricks_workspace_file,omitempty"` + AccessControlRuleSet map[string]any `json:"databricks_access_control_rule_set,omitempty"` + ArtifactAllowlist map[string]any `json:"databricks_artifact_allowlist,omitempty"` + AutomaticClusterUpdateWorkspaceSetting map[string]any `json:"databricks_automatic_cluster_update_workspace_setting,omitempty"` + AwsS3Mount map[string]any `json:"databricks_aws_s3_mount,omitempty"` + AzureAdlsGen1Mount map[string]any `json:"databricks_azure_adls_gen1_mount,omitempty"` + AzureAdlsGen2Mount map[string]any `json:"databricks_azure_adls_gen2_mount,omitempty"` + AzureBlobMount map[string]any `json:"databricks_azure_blob_mount,omitempty"` + Catalog map[string]any `json:"databricks_catalog,omitempty"` + CatalogWorkspaceBinding map[string]any `json:"databricks_catalog_workspace_binding,omitempty"` + Cluster map[string]any `json:"databricks_cluster,omitempty"` + ClusterPolicy map[string]any `json:"databricks_cluster_policy,omitempty"` + ComplianceSecurityProfileWorkspaceSetting map[string]any `json:"databricks_compliance_security_profile_workspace_setting,omitempty"` + Connection map[string]any `json:"databricks_connection,omitempty"` + DbfsFile map[string]any `json:"databricks_dbfs_file,omitempty"` + DefaultNamespaceSetting map[string]any `json:"databricks_default_namespace_setting,omitempty"` + Directory map[string]any `json:"databricks_directory,omitempty"` + EnhancedSecurityMonitoringWorkspaceSetting map[string]any `json:"databricks_enhanced_security_monitoring_workspace_setting,omitempty"` + Entitlements map[string]any `json:"databricks_entitlements,omitempty"` + ExternalLocation map[string]any `json:"databricks_external_location,omitempty"` + File map[string]any `json:"databricks_file,omitempty"` + GitCredential map[string]any `json:"databricks_git_credential,omitempty"` + GlobalInitScript map[string]any `json:"databricks_global_init_script,omitempty"` + Grant map[string]any `json:"databricks_grant,omitempty"` + Grants map[string]any `json:"databricks_grants,omitempty"` + Group map[string]any `json:"databricks_group,omitempty"` + GroupInstanceProfile map[string]any `json:"databricks_group_instance_profile,omitempty"` + GroupMember map[string]any `json:"databricks_group_member,omitempty"` + GroupRole map[string]any `json:"databricks_group_role,omitempty"` + InstancePool map[string]any `json:"databricks_instance_pool,omitempty"` + InstanceProfile map[string]any `json:"databricks_instance_profile,omitempty"` + IpAccessList map[string]any `json:"databricks_ip_access_list,omitempty"` + Job map[string]any `json:"databricks_job,omitempty"` + LakehouseMonitor map[string]any `json:"databricks_lakehouse_monitor,omitempty"` + Library map[string]any `json:"databricks_library,omitempty"` + Metastore map[string]any `json:"databricks_metastore,omitempty"` + MetastoreAssignment map[string]any `json:"databricks_metastore_assignment,omitempty"` + MetastoreDataAccess map[string]any `json:"databricks_metastore_data_access,omitempty"` + MlflowExperiment map[string]any `json:"databricks_mlflow_experiment,omitempty"` + MlflowModel map[string]any `json:"databricks_mlflow_model,omitempty"` + MlflowWebhook map[string]any `json:"databricks_mlflow_webhook,omitempty"` + ModelServing map[string]any `json:"databricks_model_serving,omitempty"` + Mount map[string]any `json:"databricks_mount,omitempty"` + MwsCredentials map[string]any `json:"databricks_mws_credentials,omitempty"` + MwsCustomerManagedKeys map[string]any `json:"databricks_mws_customer_managed_keys,omitempty"` + MwsLogDelivery map[string]any `json:"databricks_mws_log_delivery,omitempty"` + MwsNccBinding map[string]any `json:"databricks_mws_ncc_binding,omitempty"` + MwsNccPrivateEndpointRule map[string]any `json:"databricks_mws_ncc_private_endpoint_rule,omitempty"` + MwsNetworkConnectivityConfig map[string]any `json:"databricks_mws_network_connectivity_config,omitempty"` + MwsNetworks map[string]any `json:"databricks_mws_networks,omitempty"` + MwsPermissionAssignment map[string]any `json:"databricks_mws_permission_assignment,omitempty"` + MwsPrivateAccessSettings map[string]any `json:"databricks_mws_private_access_settings,omitempty"` + MwsStorageConfigurations map[string]any `json:"databricks_mws_storage_configurations,omitempty"` + MwsVpcEndpoint map[string]any `json:"databricks_mws_vpc_endpoint,omitempty"` + MwsWorkspaces map[string]any `json:"databricks_mws_workspaces,omitempty"` + Notebook map[string]any `json:"databricks_notebook,omitempty"` + OboToken map[string]any `json:"databricks_obo_token,omitempty"` + OnlineTable map[string]any `json:"databricks_online_table,omitempty"` + PermissionAssignment map[string]any `json:"databricks_permission_assignment,omitempty"` + Permissions map[string]any `json:"databricks_permissions,omitempty"` + Pipeline map[string]any `json:"databricks_pipeline,omitempty"` + Provider map[string]any `json:"databricks_provider,omitempty"` + QualityMonitor map[string]any `json:"databricks_quality_monitor,omitempty"` + Recipient map[string]any `json:"databricks_recipient,omitempty"` + RegisteredModel map[string]any `json:"databricks_registered_model,omitempty"` + Repo map[string]any `json:"databricks_repo,omitempty"` + RestrictWorkspaceAdminsSetting map[string]any `json:"databricks_restrict_workspace_admins_setting,omitempty"` + Schema map[string]any `json:"databricks_schema,omitempty"` + Secret map[string]any `json:"databricks_secret,omitempty"` + SecretAcl map[string]any `json:"databricks_secret_acl,omitempty"` + SecretScope map[string]any `json:"databricks_secret_scope,omitempty"` + ServicePrincipal map[string]any `json:"databricks_service_principal,omitempty"` + ServicePrincipalRole map[string]any `json:"databricks_service_principal_role,omitempty"` + ServicePrincipalSecret map[string]any `json:"databricks_service_principal_secret,omitempty"` + Share map[string]any `json:"databricks_share,omitempty"` + SqlAlert map[string]any `json:"databricks_sql_alert,omitempty"` + SqlDashboard map[string]any `json:"databricks_sql_dashboard,omitempty"` + SqlEndpoint map[string]any `json:"databricks_sql_endpoint,omitempty"` + SqlGlobalConfig map[string]any `json:"databricks_sql_global_config,omitempty"` + SqlPermissions map[string]any `json:"databricks_sql_permissions,omitempty"` + SqlQuery map[string]any `json:"databricks_sql_query,omitempty"` + SqlTable map[string]any `json:"databricks_sql_table,omitempty"` + SqlVisualization map[string]any `json:"databricks_sql_visualization,omitempty"` + SqlWidget map[string]any `json:"databricks_sql_widget,omitempty"` + StorageCredential map[string]any `json:"databricks_storage_credential,omitempty"` + SystemSchema map[string]any `json:"databricks_system_schema,omitempty"` + Table map[string]any `json:"databricks_table,omitempty"` + Token map[string]any `json:"databricks_token,omitempty"` + User map[string]any `json:"databricks_user,omitempty"` + UserInstanceProfile map[string]any `json:"databricks_user_instance_profile,omitempty"` + UserRole map[string]any `json:"databricks_user_role,omitempty"` + VectorSearchEndpoint map[string]any `json:"databricks_vector_search_endpoint,omitempty"` + VectorSearchIndex map[string]any `json:"databricks_vector_search_index,omitempty"` + Volume map[string]any `json:"databricks_volume,omitempty"` + WorkspaceConf map[string]any `json:"databricks_workspace_conf,omitempty"` + WorkspaceFile map[string]any `json:"databricks_workspace_file,omitempty"` } func NewResources() *Resources { return &Resources{ - AccessControlRuleSet: make(map[string]*ResourceAccessControlRuleSet), - ArtifactAllowlist: make(map[string]*ResourceArtifactAllowlist), - AwsS3Mount: make(map[string]*ResourceAwsS3Mount), - AzureAdlsGen1Mount: make(map[string]*ResourceAzureAdlsGen1Mount), - AzureAdlsGen2Mount: make(map[string]*ResourceAzureAdlsGen2Mount), - AzureBlobMount: make(map[string]*ResourceAzureBlobMount), - Catalog: make(map[string]*ResourceCatalog), - CatalogWorkspaceBinding: make(map[string]*ResourceCatalogWorkspaceBinding), - Cluster: make(map[string]*ResourceCluster), - ClusterPolicy: make(map[string]*ResourceClusterPolicy), - Connection: make(map[string]*ResourceConnection), - DbfsFile: make(map[string]*ResourceDbfsFile), - DefaultNamespaceSetting: make(map[string]*ResourceDefaultNamespaceSetting), - Directory: make(map[string]*ResourceDirectory), - Entitlements: make(map[string]*ResourceEntitlements), - ExternalLocation: make(map[string]*ResourceExternalLocation), - GitCredential: make(map[string]*ResourceGitCredential), - GlobalInitScript: make(map[string]*ResourceGlobalInitScript), - Grants: make(map[string]*ResourceGrants), - Group: make(map[string]*ResourceGroup), - GroupInstanceProfile: make(map[string]*ResourceGroupInstanceProfile), - GroupMember: make(map[string]*ResourceGroupMember), - GroupRole: make(map[string]*ResourceGroupRole), - InstancePool: make(map[string]*ResourceInstancePool), - InstanceProfile: make(map[string]*ResourceInstanceProfile), - IpAccessList: make(map[string]*ResourceIpAccessList), - Job: make(map[string]*ResourceJob), - Library: make(map[string]*ResourceLibrary), - Metastore: make(map[string]*ResourceMetastore), - MetastoreAssignment: make(map[string]*ResourceMetastoreAssignment), - MetastoreDataAccess: make(map[string]*ResourceMetastoreDataAccess), - MlflowExperiment: make(map[string]*ResourceMlflowExperiment), - MlflowModel: make(map[string]*ResourceMlflowModel), - MlflowWebhook: make(map[string]*ResourceMlflowWebhook), - ModelServing: make(map[string]*ResourceModelServing), - Mount: make(map[string]*ResourceMount), - MwsCredentials: make(map[string]*ResourceMwsCredentials), - MwsCustomerManagedKeys: make(map[string]*ResourceMwsCustomerManagedKeys), - MwsLogDelivery: make(map[string]*ResourceMwsLogDelivery), - MwsNetworks: make(map[string]*ResourceMwsNetworks), - MwsPermissionAssignment: make(map[string]*ResourceMwsPermissionAssignment), - MwsPrivateAccessSettings: make(map[string]*ResourceMwsPrivateAccessSettings), - MwsStorageConfigurations: make(map[string]*ResourceMwsStorageConfigurations), - MwsVpcEndpoint: make(map[string]*ResourceMwsVpcEndpoint), - MwsWorkspaces: make(map[string]*ResourceMwsWorkspaces), - Notebook: make(map[string]*ResourceNotebook), - OboToken: make(map[string]*ResourceOboToken), - PermissionAssignment: make(map[string]*ResourcePermissionAssignment), - Permissions: make(map[string]*ResourcePermissions), - Pipeline: make(map[string]*ResourcePipeline), - Provider: make(map[string]*ResourceProvider), - Recipient: make(map[string]*ResourceRecipient), - RegisteredModel: make(map[string]*ResourceRegisteredModel), - Repo: make(map[string]*ResourceRepo), - Schema: make(map[string]*ResourceSchema), - Secret: make(map[string]*ResourceSecret), - SecretAcl: make(map[string]*ResourceSecretAcl), - SecretScope: make(map[string]*ResourceSecretScope), - ServicePrincipal: make(map[string]*ResourceServicePrincipal), - ServicePrincipalRole: make(map[string]*ResourceServicePrincipalRole), - ServicePrincipalSecret: make(map[string]*ResourceServicePrincipalSecret), - Share: make(map[string]*ResourceShare), - SqlAlert: make(map[string]*ResourceSqlAlert), - SqlDashboard: make(map[string]*ResourceSqlDashboard), - SqlEndpoint: make(map[string]*ResourceSqlEndpoint), - SqlGlobalConfig: make(map[string]*ResourceSqlGlobalConfig), - SqlPermissions: make(map[string]*ResourceSqlPermissions), - SqlQuery: make(map[string]*ResourceSqlQuery), - SqlTable: make(map[string]*ResourceSqlTable), - SqlVisualization: make(map[string]*ResourceSqlVisualization), - SqlWidget: make(map[string]*ResourceSqlWidget), - StorageCredential: make(map[string]*ResourceStorageCredential), - SystemSchema: make(map[string]*ResourceSystemSchema), - Table: make(map[string]*ResourceTable), - Token: make(map[string]*ResourceToken), - User: make(map[string]*ResourceUser), - UserInstanceProfile: make(map[string]*ResourceUserInstanceProfile), - UserRole: make(map[string]*ResourceUserRole), - Volume: make(map[string]*ResourceVolume), - WorkspaceConf: make(map[string]*ResourceWorkspaceConf), - WorkspaceFile: make(map[string]*ResourceWorkspaceFile), + AccessControlRuleSet: make(map[string]any), + ArtifactAllowlist: make(map[string]any), + AutomaticClusterUpdateWorkspaceSetting: make(map[string]any), + AwsS3Mount: make(map[string]any), + AzureAdlsGen1Mount: make(map[string]any), + AzureAdlsGen2Mount: make(map[string]any), + AzureBlobMount: make(map[string]any), + Catalog: make(map[string]any), + CatalogWorkspaceBinding: make(map[string]any), + Cluster: make(map[string]any), + ClusterPolicy: make(map[string]any), + ComplianceSecurityProfileWorkspaceSetting: make(map[string]any), + Connection: make(map[string]any), + DbfsFile: make(map[string]any), + DefaultNamespaceSetting: make(map[string]any), + Directory: make(map[string]any), + EnhancedSecurityMonitoringWorkspaceSetting: make(map[string]any), + Entitlements: make(map[string]any), + ExternalLocation: make(map[string]any), + File: make(map[string]any), + GitCredential: make(map[string]any), + GlobalInitScript: make(map[string]any), + Grant: make(map[string]any), + Grants: make(map[string]any), + Group: make(map[string]any), + GroupInstanceProfile: make(map[string]any), + GroupMember: make(map[string]any), + GroupRole: make(map[string]any), + InstancePool: make(map[string]any), + InstanceProfile: make(map[string]any), + IpAccessList: make(map[string]any), + Job: make(map[string]any), + LakehouseMonitor: make(map[string]any), + Library: make(map[string]any), + Metastore: make(map[string]any), + MetastoreAssignment: make(map[string]any), + MetastoreDataAccess: make(map[string]any), + MlflowExperiment: make(map[string]any), + MlflowModel: make(map[string]any), + MlflowWebhook: make(map[string]any), + ModelServing: make(map[string]any), + Mount: make(map[string]any), + MwsCredentials: make(map[string]any), + MwsCustomerManagedKeys: make(map[string]any), + MwsLogDelivery: make(map[string]any), + MwsNccBinding: make(map[string]any), + MwsNccPrivateEndpointRule: make(map[string]any), + MwsNetworkConnectivityConfig: make(map[string]any), + MwsNetworks: make(map[string]any), + MwsPermissionAssignment: make(map[string]any), + MwsPrivateAccessSettings: make(map[string]any), + MwsStorageConfigurations: make(map[string]any), + MwsVpcEndpoint: make(map[string]any), + MwsWorkspaces: make(map[string]any), + Notebook: make(map[string]any), + OboToken: make(map[string]any), + OnlineTable: make(map[string]any), + PermissionAssignment: make(map[string]any), + Permissions: make(map[string]any), + Pipeline: make(map[string]any), + Provider: make(map[string]any), + QualityMonitor: make(map[string]any), + Recipient: make(map[string]any), + RegisteredModel: make(map[string]any), + Repo: make(map[string]any), + RestrictWorkspaceAdminsSetting: make(map[string]any), + Schema: make(map[string]any), + Secret: make(map[string]any), + SecretAcl: make(map[string]any), + SecretScope: make(map[string]any), + ServicePrincipal: make(map[string]any), + ServicePrincipalRole: make(map[string]any), + ServicePrincipalSecret: make(map[string]any), + Share: make(map[string]any), + SqlAlert: make(map[string]any), + SqlDashboard: make(map[string]any), + SqlEndpoint: make(map[string]any), + SqlGlobalConfig: make(map[string]any), + SqlPermissions: make(map[string]any), + SqlQuery: make(map[string]any), + SqlTable: make(map[string]any), + SqlVisualization: make(map[string]any), + SqlWidget: make(map[string]any), + StorageCredential: make(map[string]any), + SystemSchema: make(map[string]any), + Table: make(map[string]any), + Token: make(map[string]any), + User: make(map[string]any), + UserInstanceProfile: make(map[string]any), + UserRole: make(map[string]any), + VectorSearchEndpoint: make(map[string]any), + VectorSearchIndex: make(map[string]any), + Volume: make(map[string]any), + WorkspaceConf: make(map[string]any), + WorkspaceFile: make(map[string]any), } } diff --git a/bundle/internal/tf/schema/root.go b/bundle/internal/tf/schema/root.go index 937182d4d..39db3ea2f 100644 --- a/bundle/internal/tf/schema/root.go +++ b/bundle/internal/tf/schema/root.go @@ -19,13 +19,17 @@ type Root struct { Resource *Resources `json:"resource,omitempty"` } +const ProviderHost = "registry.terraform.io" +const ProviderSource = "databricks/databricks" +const ProviderVersion = "1.48.0" + func NewRoot() *Root { return &Root{ Terraform: map[string]interface{}{ "required_providers": map[string]interface{}{ "databricks": map[string]interface{}{ - "source": "databricks/databricks", - "version": "1.31.1", + "source": ProviderSource, + "version": ProviderVersion, }, }, }, diff --git a/bundle/libraries/helpers.go b/bundle/libraries/helpers.go new file mode 100644 index 000000000..89679c91a --- /dev/null +++ b/bundle/libraries/helpers.go @@ -0,0 +1,16 @@ +package libraries + +import "github.com/databricks/databricks-sdk-go/service/compute" + +func libraryPath(library *compute.Library) string { + if library.Whl != "" { + return library.Whl + } + if library.Jar != "" { + return library.Jar + } + if library.Egg != "" { + return library.Egg + } + return "" +} diff --git a/bundle/libraries/helpers_test.go b/bundle/libraries/helpers_test.go new file mode 100644 index 000000000..adc20a246 --- /dev/null +++ b/bundle/libraries/helpers_test.go @@ -0,0 +1,17 @@ +package libraries + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" +) + +func TestLibraryPath(t *testing.T) { + path := "/some/path" + + assert.Equal(t, path, libraryPath(&compute.Library{Whl: path})) + assert.Equal(t, path, libraryPath(&compute.Library{Jar: path})) + assert.Equal(t, path, libraryPath(&compute.Library{Egg: path})) + assert.Equal(t, "", libraryPath(&compute.Library{})) +} diff --git a/bundle/libraries/libraries.go b/bundle/libraries/libraries.go index 548d5ef1b..84ead052b 100644 --- a/bundle/libraries/libraries.go +++ b/bundle/libraries/libraries.go @@ -1,77 +1,77 @@ package libraries import ( - "context" - "fmt" - "net/url" - "path" - "path/filepath" - "strings" - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" ) -type match struct { -} - -func MatchWithArtifacts() bundle.Mutator { - return &match{} -} - -func (a *match) Name() string { - return "libraries.MatchWithArtifacts" -} - -func (a *match) Apply(ctx context.Context, b *bundle.Bundle) error { - tasks := findAllTasks(b) - for _, task := range tasks { - if isMissingRequiredLibraries(task) { - return fmt.Errorf("task '%s' is missing required libraries. Please include your package code in task libraries block", task.TaskKey) - } - for j := range task.Libraries { - lib := &task.Libraries[j] - err := findArtifactsAndMarkForUpload(ctx, lib, b) - if err != nil { - return err - } - } - } - return nil -} - -func findAllTasks(b *bundle.Bundle) []*jobs.Task { +func findAllTasks(b *bundle.Bundle) map[string]([]jobs.Task) { r := b.Config.Resources - result := make([]*jobs.Task, 0) + result := make(map[string]([]jobs.Task), 0) for k := range b.Config.Resources.Jobs { - tasks := r.Jobs[k].JobSettings.Tasks - for i := range tasks { - task := &tasks[i] - result = append(result, task) - } + result[k] = append(result[k], r.Jobs[k].JobSettings.Tasks...) } return result } +func FindAllEnvironments(b *bundle.Bundle) map[string]([]jobs.JobEnvironment) { + jobEnvs := make(map[string]([]jobs.JobEnvironment), 0) + for jobKey, job := range b.Config.Resources.Jobs { + if len(job.Environments) == 0 { + continue + } + + jobEnvs[jobKey] = job.Environments + } + + return jobEnvs +} + +func isEnvsWithLocalLibraries(envs []jobs.JobEnvironment) bool { + for _, e := range envs { + if e.Spec == nil { + continue + } + + for _, l := range e.Spec.Dependencies { + if IsEnvironmentDependencyLocal(l) { + return true + } + } + } + + return false +} + func FindAllWheelTasksWithLocalLibraries(b *bundle.Bundle) []*jobs.Task { tasks := findAllTasks(b) + envs := FindAllEnvironments(b) + wheelTasks := make([]*jobs.Task, 0) - for _, task := range tasks { - if task.PythonWheelTask != nil && IsTaskWithLocalLibraries(task) { - wheelTasks = append(wheelTasks, task) + for k, jobTasks := range tasks { + for i := range jobTasks { + task := &jobTasks[i] + if task.PythonWheelTask == nil { + continue + } + + if isTaskWithLocalLibraries(*task) { + wheelTasks = append(wheelTasks, task) + } + + if envs[k] != nil && isEnvsWithLocalLibraries(envs[k]) { + wheelTasks = append(wheelTasks, task) + } } } return wheelTasks } -func IsTaskWithLocalLibraries(task *jobs.Task) bool { +func isTaskWithLocalLibraries(task jobs.Task) bool { for _, l := range task.Libraries { - if isLocalLibrary(&l) { + if IsLocalLibrary(&l) { return true } } @@ -79,130 +79,12 @@ func IsTaskWithLocalLibraries(task *jobs.Task) bool { return false } -func IsTaskWithWorkspaceLibraries(task *jobs.Task) bool { +func IsTaskWithWorkspaceLibraries(task jobs.Task) bool { for _, l := range task.Libraries { - path := libPath(&l) - if isWorkspacePath(path) { + if IsWorkspaceLibrary(&l) { return true } } return false } - -func isMissingRequiredLibraries(task *jobs.Task) bool { - if task.Libraries != nil { - return false - } - - return task.PythonWheelTask != nil || task.SparkJarTask != nil -} - -func findLibraryMatches(lib *compute.Library, b *bundle.Bundle) ([]string, error) { - path := libPath(lib) - if path == "" { - return nil, nil - } - - fullPath := filepath.Join(b.Config.Path, path) - return filepath.Glob(fullPath) -} - -func findArtifactsAndMarkForUpload(ctx context.Context, lib *compute.Library, b *bundle.Bundle) error { - matches, err := findLibraryMatches(lib, b) - if err != nil { - return err - } - - if len(matches) == 0 && isLocalLibrary(lib) { - return fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libPath(lib)) - } - - for _, match := range matches { - af, err := findArtifactFileByLocalPath(match, b) - if err != nil { - cmdio.LogString(ctx, fmt.Sprintf("%s. Skipping uploading. In order to use the define 'artifacts' section", err.Error())) - } else { - af.Libraries = append(af.Libraries, lib) - } - } - - return nil -} - -func findArtifactFileByLocalPath(path string, b *bundle.Bundle) (*config.ArtifactFile, error) { - for _, a := range b.Config.Artifacts { - for k := range a.Files { - if a.Files[k].Source == path { - return &a.Files[k], nil - } - } - } - - return nil, fmt.Errorf("artifact section is not defined for file at %s", path) -} - -func libPath(library *compute.Library) string { - if library.Whl != "" { - return library.Whl - } - if library.Jar != "" { - return library.Jar - } - if library.Egg != "" { - return library.Egg - } - - return "" -} - -func isLocalLibrary(library *compute.Library) bool { - path := libPath(library) - if path == "" { - return false - } - - return IsLocalPath(path) -} - -func IsLocalPath(path string) bool { - if isExplicitFileScheme(path) { - return true - } - - if isRemoteStorageScheme(path) { - return false - } - - return !isAbsoluteRemotePath(path) -} - -func isExplicitFileScheme(path string) bool { - return strings.HasPrefix(path, "file://") -} - -func isRemoteStorageScheme(path string) bool { - url, err := url.Parse(path) - if err != nil { - return false - } - - if url.Scheme == "" { - return false - } - - // If the path starts with scheme:/ format, it's a correct remote storage scheme - return strings.HasPrefix(path, url.Scheme+":/") - -} - -func isWorkspacePath(path string) bool { - return strings.HasPrefix(path, "/Workspace/") || - strings.HasPrefix(path, "/Users/") || - strings.HasPrefix(path, "/Shared/") -} - -func isAbsoluteRemotePath(p string) bool { - // If path for library starts with /, it's a remote absolute path - return path.IsAbs(p) -} diff --git a/bundle/libraries/libraries_test.go b/bundle/libraries/libraries_test.go deleted file mode 100644 index 41609bd4e..000000000 --- a/bundle/libraries/libraries_test.go +++ /dev/null @@ -1,31 +0,0 @@ -package libraries - -import ( - "fmt" - "testing" - - "github.com/databricks/databricks-sdk-go/service/compute" - "github.com/stretchr/testify/require" -) - -var testCases map[string]bool = map[string]bool{ - "./some/local/path": true, - "/some/full/path": false, - "/Workspace/path/to/package": false, - "/Users/path/to/package": false, - "file://path/to/package": true, - "C:\\path\\to\\package": true, - "dbfs://path/to/package": false, - "dbfs:/path/to/package": false, - "s3://path/to/package": false, - "abfss://path/to/package": false, -} - -func TestIsLocalLbrary(t *testing.T) { - for p, result := range testCases { - lib := compute.Library{ - Whl: p, - } - require.Equal(t, result, isLocalLibrary(&lib), fmt.Sprintf("isLocalLibrary must return %t for path %s ", result, p)) - } -} diff --git a/bundle/libraries/local_path.go b/bundle/libraries/local_path.go new file mode 100644 index 000000000..f1e3788f2 --- /dev/null +++ b/bundle/libraries/local_path.go @@ -0,0 +1,82 @@ +package libraries + +import ( + "net/url" + "path" + "strings" + + "github.com/databricks/databricks-sdk-go/service/compute" +) + +// IsLocalPath returns true if the specified path indicates that +// it should be interpreted as a path on the local file system. +// +// The following paths are considered local: +// +// - myfile.txt +// - ./myfile.txt +// - ../myfile.txt +// - file:///foo/bar/myfile.txt +// +// The following paths are considered remote: +// +// - dbfs:/mnt/myfile.txt +// - s3:/mybucket/myfile.txt +// - /Users/jane@doe.com/myfile.txt +func IsLocalPath(p string) bool { + // If the path has the explicit file scheme, it's a local path. + if strings.HasPrefix(p, "file://") { + return true + } + + // If the path has another scheme, it's a remote path. + if isRemoteStorageScheme(p) { + return false + } + + // If path starts with /, it's a remote absolute path + return !path.IsAbs(p) +} + +// IsEnvironmentDependencyLocal returns true if the specified dependency +// should be interpreted as a local path. +// We use this to check if the dependency in environment spec is local. +// We can't use IsLocalPath beacuse environment dependencies can be +// a pypi package name which can be misinterpreted as a local path by IsLocalPath. +func IsEnvironmentDependencyLocal(dep string) bool { + possiblePrefixes := []string{ + ".", + } + + for _, prefix := range possiblePrefixes { + if strings.HasPrefix(dep, prefix) { + return true + } + } + + return false +} + +func isRemoteStorageScheme(path string) bool { + url, err := url.Parse(path) + if err != nil { + return false + } + + if url.Scheme == "" { + return false + } + + // If the path starts with scheme:/ format, it's a correct remote storage scheme + return strings.HasPrefix(path, url.Scheme+":/") +} + +// IsLocalLibrary returns true if the specified library refers to a local path. +func IsLocalLibrary(library *compute.Library) bool { + path := libraryPath(library) + if path == "" { + return false + } + + return IsLocalPath(path) +} diff --git a/bundle/libraries/local_path_test.go b/bundle/libraries/local_path_test.go new file mode 100644 index 000000000..d2492d6b1 --- /dev/null +++ b/bundle/libraries/local_path_test.go @@ -0,0 +1,72 @@ +package libraries + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestIsLocalPath(t *testing.T) { + // Relative paths, paths with the file scheme, and Windows paths. + assert.True(t, IsLocalPath("./some/local/path")) + assert.True(t, IsLocalPath("file://path/to/package")) + assert.True(t, IsLocalPath("C:\\path\\to\\package")) + assert.True(t, IsLocalPath("myfile.txt")) + assert.True(t, IsLocalPath("./myfile.txt")) + assert.True(t, IsLocalPath("../myfile.txt")) + assert.True(t, IsLocalPath("file:///foo/bar/myfile.txt")) + + // Absolute paths. + assert.False(t, IsLocalPath("/some/full/path")) + assert.False(t, IsLocalPath("/Workspace/path/to/package")) + assert.False(t, IsLocalPath("/Users/path/to/package")) + + // Paths with schemes. + assert.False(t, IsLocalPath("dbfs://path/to/package")) + assert.False(t, IsLocalPath("dbfs:/path/to/package")) + assert.False(t, IsLocalPath("s3://path/to/package")) + assert.False(t, IsLocalPath("abfss://path/to/package")) +} + +func TestIsLocalLibrary(t *testing.T) { + // Local paths. + assert.True(t, IsLocalLibrary(&compute.Library{Whl: "./file.whl"})) + assert.True(t, IsLocalLibrary(&compute.Library{Jar: "../target/some.jar"})) + + // Non-local paths. + assert.False(t, IsLocalLibrary(&compute.Library{Whl: "/Workspace/path/to/file.whl"})) + assert.False(t, IsLocalLibrary(&compute.Library{Jar: "s3:/bucket/path/some.jar"})) + + // Empty. + assert.False(t, IsLocalLibrary(&compute.Library{})) +} + +func TestIsEnvironmentDependencyLocal(t *testing.T) { + testCases := [](struct { + path string + expected bool + }){ + {path: "./local/*.whl", expected: true}, + {path: ".\\local\\*.whl", expected: true}, + {path: "./local/mypath.whl", expected: true}, + {path: ".\\local\\mypath.whl", expected: true}, + {path: "../local/*.whl", expected: true}, + {path: "..\\local\\*.whl", expected: true}, + {path: "./../local/*.whl", expected: true}, + {path: ".\\..\\local\\*.whl", expected: true}, + {path: "../../local/*.whl", expected: true}, + {path: "..\\..\\local\\*.whl", expected: true}, + {path: "pypipackage", expected: false}, + {path: "pypipackage/test.whl", expected: false}, + {path: "pypipackage/*.whl", expected: false}, + {path: "/Volumes/catalog/schema/volume/path.whl", expected: false}, + {path: "/Workspace/my_project/dist.whl", expected: false}, + {path: "-r /Workspace/my_project/requirements.txt", expected: false}, + } + + for _, tc := range testCases { + require.Equal(t, IsEnvironmentDependencyLocal(tc.path), tc.expected) + } +} diff --git a/bundle/libraries/match.go b/bundle/libraries/match.go new file mode 100644 index 000000000..4feb4225d --- /dev/null +++ b/bundle/libraries/match.go @@ -0,0 +1,82 @@ +package libraries + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" +) + +type match struct { +} + +func ValidateLocalLibrariesExist() bundle.Mutator { + return &match{} +} + +func (a *match) Name() string { + return "libraries.ValidateLocalLibrariesExist" +} + +func (a *match) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + for _, job := range b.Config.Resources.Jobs { + err := validateEnvironments(job.Environments, b) + if err != nil { + return diag.FromErr(err) + } + + for _, task := range job.JobSettings.Tasks { + err := validateTaskLibraries(task.Libraries, b) + if err != nil { + return diag.FromErr(err) + } + } + } + + return nil +} + +func validateTaskLibraries(libs []compute.Library, b *bundle.Bundle) error { + for _, lib := range libs { + path := libraryPath(&lib) + if path == "" || !IsLocalPath(path) { + continue + } + + matches, err := filepath.Glob(filepath.Join(b.RootPath, path)) + if err != nil { + return err + } + + if len(matches) == 0 { + return fmt.Errorf("file %s is referenced in libraries section but doesn't exist on the local file system", libraryPath(&lib)) + } + } + + return nil +} + +func validateEnvironments(envs []jobs.JobEnvironment, b *bundle.Bundle) error { + for _, env := range envs { + if env.Spec == nil { + continue + } + + for _, dep := range env.Spec.Dependencies { + matches, err := filepath.Glob(filepath.Join(b.RootPath, dep)) + if err != nil { + return err + } + + if len(matches) == 0 && IsEnvironmentDependencyLocal(dep) { + return fmt.Errorf("file %s is referenced in environments section but doesn't exist on the local file system", dep) + } + } + } + + return nil +} diff --git a/bundle/libraries/match_test.go b/bundle/libraries/match_test.go new file mode 100644 index 000000000..bb4b15107 --- /dev/null +++ b/bundle/libraries/match_test.go @@ -0,0 +1,148 @@ +package libraries + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/require" +) + +func TestValidateEnvironments(t *testing.T) { + tmpDir := t.TempDir() + testutil.Touch(t, tmpDir, "wheel.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + "./wheel.whl", + "simplejson", + "/Workspace/Users/foo@bar.com/artifacts/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + require.Nil(t, diags) +} + +func TestValidateEnvironmentsNoFile(t *testing.T) { + tmpDir := t.TempDir() + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + "./wheel.whl", + "simplejson", + "/Workspace/Users/foo@bar.com/artifacts/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + require.Len(t, diags, 1) + require.Equal(t, "file ./wheel.whl is referenced in environments section but doesn't exist on the local file system", diags[0].Summary) +} + +func TestValidateTaskLibraries(t *testing.T) { + tmpDir := t.TempDir() + testutil.Touch(t, tmpDir, "wheel.whl") + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "./wheel.whl", + }, + { + Whl: "/Workspace/Users/foo@bar.com/artifacts/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + require.Nil(t, diags) +} + +func TestValidateTaskLibrariesNoFile(t *testing.T) { + tmpDir := t.TempDir() + + b := &bundle.Bundle{ + RootPath: tmpDir, + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "./wheel.whl", + }, + { + Whl: "/Workspace/Users/foo@bar.com/artifacts/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, ValidateLocalLibrariesExist()) + require.Len(t, diags, 1) + require.Equal(t, "file ./wheel.whl is referenced in libraries section but doesn't exist on the local file system", diags[0].Summary) +} diff --git a/bundle/libraries/testdata/library1 b/bundle/libraries/testdata/library1 new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/libraries/testdata/library2 b/bundle/libraries/testdata/library2 new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/libraries/workspace_path.go b/bundle/libraries/workspace_path.go new file mode 100644 index 000000000..b08ca1616 --- /dev/null +++ b/bundle/libraries/workspace_path.go @@ -0,0 +1,38 @@ +package libraries + +import ( + "strings" + + "github.com/databricks/databricks-sdk-go/service/compute" +) + +// IsWorkspacePath returns true if the specified path indicates that +// it should be interpreted as a Databricks Workspace path. +// +// The following paths are considered workspace paths: +// +// - /Workspace/Users/jane@doe.com/myfile +// - /Users/jane@doe.com/myfile +// - /Shared/project/myfile +// +// The following paths are not considered workspace paths: +// +// - myfile.txt +// - ./myfile.txt +// - ../myfile.txt +// - /foo/bar/myfile.txt +func IsWorkspacePath(path string) bool { + return strings.HasPrefix(path, "/Workspace/") || + strings.HasPrefix(path, "/Users/") || + strings.HasPrefix(path, "/Shared/") +} + +// IsWorkspaceLibrary returns true if the specified library refers to a workspace path. +func IsWorkspaceLibrary(library *compute.Library) bool { + path := libraryPath(library) + if path == "" { + return false + } + + return IsWorkspacePath(path) +} diff --git a/bundle/libraries/workspace_path_test.go b/bundle/libraries/workspace_path_test.go new file mode 100644 index 000000000..feaaab7f7 --- /dev/null +++ b/bundle/libraries/workspace_path_test.go @@ -0,0 +1,33 @@ +package libraries + +import ( + "testing" + + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/assert" +) + +func TestIsWorkspacePath(t *testing.T) { + // Absolute paths with particular prefixes. + assert.True(t, IsWorkspacePath("/Workspace/path/to/package")) + assert.True(t, IsWorkspacePath("/Users/path/to/package")) + assert.True(t, IsWorkspacePath("/Shared/path/to/package")) + + // Relative paths. + assert.False(t, IsWorkspacePath("myfile.txt")) + assert.False(t, IsWorkspacePath("./myfile.txt")) + assert.False(t, IsWorkspacePath("../myfile.txt")) +} + +func TestIsWorkspaceLibrary(t *testing.T) { + // Workspace paths. + assert.True(t, IsWorkspaceLibrary(&compute.Library{Whl: "/Workspace/path/to/file.whl"})) + + // Non-workspace paths. + assert.False(t, IsWorkspaceLibrary(&compute.Library{Whl: "./file.whl"})) + assert.False(t, IsWorkspaceLibrary(&compute.Library{Jar: "../target/some.jar"})) + assert.False(t, IsWorkspaceLibrary(&compute.Library{Jar: "s3:/bucket/path/some.jar"})) + + // Empty. + assert.False(t, IsWorkspaceLibrary(&compute.Library{})) +} diff --git a/bundle/log_string.go b/bundle/log_string.go index 63800d6df..f14e3a3ad 100644 --- a/bundle/log_string.go +++ b/bundle/log_string.go @@ -4,6 +4,7 @@ import ( "context" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" ) type LogStringMutator struct { @@ -20,7 +21,7 @@ func LogString(message string) Mutator { } } -func (m *LogStringMutator) Apply(ctx context.Context, b *Bundle) error { +func (m *LogStringMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { cmdio.LogString(ctx, m.message) return nil diff --git a/bundle/mutator.go b/bundle/mutator.go index e559d2375..6c9968aac 100644 --- a/bundle/mutator.go +++ b/bundle/mutator.go @@ -3,6 +3,7 @@ package bundle import ( "context" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -13,18 +14,53 @@ type Mutator interface { Name() string // Apply mutates the specified bundle object. - Apply(context.Context, *Bundle) error + Apply(context.Context, *Bundle) diag.Diagnostics } -func Apply(ctx context.Context, b *Bundle, m Mutator) error { +func Apply(ctx context.Context, b *Bundle, m Mutator) diag.Diagnostics { ctx = log.NewContext(ctx, log.GetLogger(ctx).With("mutator", m.Name())) log.Debugf(ctx, "Apply") - err := m.Apply(ctx, b) + + err := b.Config.MarkMutatorEntry(ctx) if err != nil { - log.Errorf(ctx, "Error: %s", err) - return err + log.Errorf(ctx, "entry error: %s", err) + return diag.Errorf("entry error: %s", err) } - return nil + defer func() { + err := b.Config.MarkMutatorExit(ctx) + if err != nil { + log.Errorf(ctx, "exit error: %s", err) + } + }() + + diags := m.Apply(ctx, b) + + // Log error in diagnostics if any. + // Note: errors should be logged when constructing them + // such that they are not logged multiple times. + // If this is done, we can omit this block. + if err := diags.Error(); err != nil { + log.Errorf(ctx, "Error: %s", err) + } + + return diags +} + +type funcMutator struct { + fn func(context.Context, *Bundle) diag.Diagnostics +} + +func (m funcMutator) Name() string { + return "" +} + +func (m funcMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { + return m.fn(ctx, b) +} + +// ApplyFunc applies an inline-specified function mutator. +func ApplyFunc(ctx context.Context, b *Bundle, fn func(context.Context, *Bundle) diag.Diagnostics) diag.Diagnostics { + return Apply(ctx, b, funcMutator{fn}) } diff --git a/bundle/mutator_read_only.go b/bundle/mutator_read_only.go new file mode 100644 index 000000000..ee4e36e0f --- /dev/null +++ b/bundle/mutator_read_only.go @@ -0,0 +1,29 @@ +package bundle + +import ( + "context" + + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/log" +) + +// ReadOnlyMutator is the interface type that allows access to bundle configuration but does not allow any mutations. +type ReadOnlyMutator interface { + // Name returns the mutators name. + Name() string + + // Apply access the specified read-only bundle object. + Apply(context.Context, ReadOnlyBundle) diag.Diagnostics +} + +func ApplyReadOnly(ctx context.Context, rb ReadOnlyBundle, m ReadOnlyMutator) diag.Diagnostics { + ctx = log.NewContext(ctx, log.GetLogger(ctx).With("mutator (read-only)", m.Name())) + + log.Debugf(ctx, "ApplyReadOnly") + diags := m.Apply(ctx, rb) + if err := diags.Error(); err != nil { + log.Errorf(ctx, "Error: %s", err) + } + + return diags +} diff --git a/bundle/mutator_test.go b/bundle/mutator_test.go index c1f3c075f..04ff19cff 100644 --- a/bundle/mutator_test.go +++ b/bundle/mutator_test.go @@ -4,6 +4,7 @@ import ( "context" "testing" + "github.com/databricks/cli/libs/diag" "github.com/stretchr/testify/assert" ) @@ -16,7 +17,7 @@ func (t *testMutator) Name() string { return "test" } -func (t *testMutator) Apply(ctx context.Context, b *Bundle) error { +func (t *testMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { t.applyCalled++ return Apply(ctx, b, Seq(t.nestedMutators...)) } @@ -35,8 +36,8 @@ func TestMutator(t *testing.T) { } b := &Bundle{} - err := Apply(context.Background(), b, m) - assert.NoError(t, err) + diags := Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) assert.Equal(t, 1, m.applyCalled) assert.Equal(t, 1, nested[0].applyCalled) diff --git a/bundle/parallel.go b/bundle/parallel.go new file mode 100644 index 000000000..ebb91661a --- /dev/null +++ b/bundle/parallel.go @@ -0,0 +1,43 @@ +package bundle + +import ( + "context" + "sync" + + "github.com/databricks/cli/libs/diag" +) + +type parallel struct { + mutators []ReadOnlyMutator +} + +func (m *parallel) Name() string { + return "parallel" +} + +func (m *parallel) Apply(ctx context.Context, rb ReadOnlyBundle) diag.Diagnostics { + var wg sync.WaitGroup + var mu sync.Mutex + var diags diag.Diagnostics + + wg.Add(len(m.mutators)) + for _, mutator := range m.mutators { + go func(mutator ReadOnlyMutator) { + defer wg.Done() + d := ApplyReadOnly(ctx, rb, mutator) + + mu.Lock() + diags = diags.Extend(d) + mu.Unlock() + }(mutator) + } + wg.Wait() + return diags +} + +// Parallel runs the given mutators in parallel. +func Parallel(mutators ...ReadOnlyMutator) ReadOnlyMutator { + return ¶llel{ + mutators: mutators, + } +} diff --git a/bundle/parallel_test.go b/bundle/parallel_test.go new file mode 100644 index 000000000..dfc7ddac9 --- /dev/null +++ b/bundle/parallel_test.go @@ -0,0 +1,82 @@ +package bundle + +import ( + "context" + "sync" + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" + "github.com/stretchr/testify/require" +) + +type addToContainer struct { + t *testing.T + container *[]int + value int + err bool + + // mu is a mutex that protects container. It is used to ensure that the + // container slice is only modified by one goroutine at a time. + mu *sync.Mutex +} + +func (m *addToContainer) Apply(ctx context.Context, b ReadOnlyBundle) diag.Diagnostics { + if m.err { + return diag.Errorf("error") + } + + m.mu.Lock() + *m.container = append(*m.container, m.value) + m.mu.Unlock() + + return nil +} + +func (m *addToContainer) Name() string { + return "addToContainer" +} + +func TestParallelMutatorWork(t *testing.T) { + b := &Bundle{ + Config: config.Root{}, + } + + container := []int{} + var mu sync.Mutex + m1 := &addToContainer{t: t, container: &container, value: 1, mu: &mu} + m2 := &addToContainer{t: t, container: &container, value: 2, mu: &mu} + m3 := &addToContainer{t: t, container: &container, value: 3, mu: &mu} + + m := Parallel(m1, m2, m3) + + // Apply the mutator + diags := ApplyReadOnly(context.Background(), ReadOnly(b), m) + require.Empty(t, diags) + require.Len(t, container, 3) + require.Contains(t, container, 1) + require.Contains(t, container, 2) + require.Contains(t, container, 3) +} + +func TestParallelMutatorWorkWithErrors(t *testing.T) { + b := &Bundle{ + Config: config.Root{}, + } + + container := []int{} + var mu sync.Mutex + m1 := &addToContainer{container: &container, value: 1, mu: &mu} + m2 := &addToContainer{container: &container, err: true, value: 2, mu: &mu} + m3 := &addToContainer{container: &container, value: 3, mu: &mu} + + m := Parallel(m1, m2, m3) + + // Apply the mutator + diags := ApplyReadOnly(context.Background(), ReadOnly(b), m) + require.Len(t, diags, 1) + require.Equal(t, "error", diags[0].Summary) + require.Len(t, container, 2) + require.Contains(t, container, 1) + require.Contains(t, container, 3) +} diff --git a/bundle/permissions/filter.go b/bundle/permissions/filter.go new file mode 100644 index 000000000..60264f6ea --- /dev/null +++ b/bundle/permissions/filter.go @@ -0,0 +1,88 @@ +package permissions + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" +) + +type filterCurrentUser struct{} + +// The databricks terraform provider does not allow changing the permissions of +// current user. The current user is implied to be the owner of all deployed resources. +// This mutator removes the current user from the permissions of all resources. +func FilterCurrentUser() bundle.Mutator { + return &filterCurrentUser{} +} + +func (m *filterCurrentUser) Name() string { + return "FilterCurrentUserFromPermissions" +} + +func filter(currentUser string) dyn.WalkValueFunc { + return func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + // Permissions are defined at top level of a resource. We can skip walking + // after a depth of 4. + // [resource_type].[resource_name].[permissions].[array_index] + // Example: pipelines.foo.permissions.0 + if len(p) > 4 { + return v, dyn.ErrSkip + } + + // We can skip walking at a depth of 3 if the key is not "permissions". + // Example: pipelines.foo.libraries + if len(p) == 3 && p[2] != dyn.Key("permissions") { + return v, dyn.ErrSkip + } + + // We want to be at the level of an individual permission to check it's + // user_name and service_principal_name fields. + if len(p) != 4 || p[2] != dyn.Key("permissions") { + return v, nil + } + + // Filter if the user_name matches the current user + userName, ok := v.Get("user_name").AsString() + if ok && userName == currentUser { + return v, dyn.ErrDrop + } + + // Filter if the service_principal_name matches the current user + servicePrincipalName, ok := v.Get("service_principal_name").AsString() + if ok && servicePrincipalName == currentUser { + return v, dyn.ErrDrop + } + + return v, nil + + } +} + +func (m *filterCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + currentUser := b.Config.Workspace.CurrentUser.UserName + + err := b.Config.Mutate(func(v dyn.Value) (dyn.Value, error) { + rv, err := dyn.Get(v, "resources") + if err != nil { + // If the resources key is not found, we can skip this mutator. + if dyn.IsNoSuchKeyError(err) { + return v, nil + } + + return dyn.InvalidValue, err + } + + // Walk the resources and filter out the current user from the permissions + nv, err := dyn.Walk(rv, filter(currentUser)) + if err != nil { + return dyn.InvalidValue, err + } + + // Set the resources with the filtered permissions back into the bundle + return dyn.Set(v, "resources", nv) + }) + + return diag.FromErr(err) +} diff --git a/bundle/permissions/filter_test.go b/bundle/permissions/filter_test.go new file mode 100644 index 000000000..121ce10dc --- /dev/null +++ b/bundle/permissions/filter_test.go @@ -0,0 +1,181 @@ +package permissions + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +var alice = resources.Permission{ + Level: CAN_MANAGE, + UserName: "alice@databricks.com", +} + +var bob = resources.Permission{ + Level: CAN_VIEW, + UserName: "bob@databricks.com", +} + +var robot = resources.Permission{ + Level: CAN_RUN, + ServicePrincipalName: "i-Robot", +} + +func testFixture(userName string) *bundle.Bundle { + p := []resources.Permission{ + alice, + bob, + robot, + } + + return &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + CurrentUser: &config.User{ + User: &iam.User{ + UserName: userName, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "job1": { + JobSettings: &jobs.JobSettings{ + Name: "job1", + }, + Permissions: p, + }, + "job2": { + JobSettings: &jobs.JobSettings{ + Name: "job2", + }, + Permissions: p, + }, + }, + Pipelines: map[string]*resources.Pipeline{ + "pipeline1": { + Permissions: p, + }, + }, + Experiments: map[string]*resources.MlflowExperiment{ + "experiment1": { + Permissions: p, + }, + }, + Models: map[string]*resources.MlflowModel{ + "model1": { + Permissions: p, + }, + }, + ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{ + "endpoint1": { + Permissions: p, + }, + }, + RegisteredModels: map[string]*resources.RegisteredModel{ + "registered_model1": { + Grants: []resources.Grant{ + { + Principal: "abc", + }, + }, + }, + }, + }, + }, + } + +} + +func TestFilterCurrentUser(t *testing.T) { + b := testFixture("alice@databricks.com") + + diags := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, diags.Error()) + + // Assert current user is filtered out. + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, robot) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) + + // Assert there's no change to the grant. + assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) +} + +func TestFilterCurrentServicePrincipal(t *testing.T) { + b := testFixture("i-Robot") + + diags := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, diags.Error()) + + // Assert current user is filtered out. + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job1"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Jobs["job1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Jobs["job2"].Permissions)) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Jobs["job2"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Pipelines["pipeline1"].Permissions)) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Pipelines["pipeline1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Experiments["experiment1"].Permissions)) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Experiments["experiment1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.Models["model1"].Permissions)) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.Models["model1"].Permissions, bob) + + assert.Equal(t, 2, len(b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions)) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, alice) + assert.Contains(t, b.Config.Resources.ModelServingEndpoints["endpoint1"].Permissions, bob) + + // Assert there's no change to the grant. + assert.Equal(t, 1, len(b.Config.Resources.RegisteredModels["registered_model1"].Grants)) +} + +func TestFilterCurrentUserDoesNotErrorWhenNoResources(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Workspace: config.Workspace{ + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "abc", + }, + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, FilterCurrentUser()) + assert.NoError(t, diags.Error()) +} diff --git a/bundle/permissions/mutator.go b/bundle/permissions/mutator.go index 54925d1c8..7787bc048 100644 --- a/bundle/permissions/mutator.go +++ b/bundle/permissions/mutator.go @@ -7,6 +7,7 @@ import ( "strings" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" ) const CAN_MANAGE = "CAN_MANAGE" @@ -46,10 +47,10 @@ func ApplyBundlePermissions() bundle.Mutator { return &bundlePermissions{} } -func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *bundlePermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := validate(b) if err != nil { - return err + return diag.FromErr(err) } applyForJobs(ctx, b) diff --git a/bundle/permissions/mutator_test.go b/bundle/permissions/mutator_test.go index 62c0589d3..1a177d902 100644 --- a/bundle/permissions/mutator_test.go +++ b/bundle/permissions/mutator_test.go @@ -7,6 +7,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) @@ -23,8 +24,16 @@ func TestApplyBundlePermissions(t *testing.T) { }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "job_1": {}, - "job_2": {}, + "job_1": { + JobSettings: &jobs.JobSettings{ + Name: "job_1", + }, + }, + "job_2": { + JobSettings: &jobs.JobSettings{ + Name: "job_2", + }, + }, }, Pipelines: map[string]*resources.Pipeline{ "pipeline_1": {}, @@ -46,8 +55,8 @@ func TestApplyBundlePermissions(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) + require.NoError(t, diags.Error()) require.Len(t, b.Config.Resources.Jobs["job_1"].Permissions, 3) require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_MANAGE", UserName: "TestUser"}) @@ -109,11 +118,17 @@ func TestWarningOnOverlapPermission(t *testing.T) { Resources: config.Resources{ Jobs: map[string]*resources.Job{ "job_1": { + JobSettings: &jobs.JobSettings{ + Name: "job_1", + }, Permissions: []resources.Permission{ {Level: CAN_VIEW, UserName: "TestUser"}, }, }, "job_2": { + JobSettings: &jobs.JobSettings{ + Name: "job_2", + }, Permissions: []resources.Permission{ {Level: CAN_VIEW, UserName: "TestUser2"}, }, @@ -123,8 +138,8 @@ func TestWarningOnOverlapPermission(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ApplyBundlePermissions()) + require.NoError(t, diags.Error()) require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_VIEW", UserName: "TestUser"}) require.Contains(t, b.Config.Resources.Jobs["job_1"].Permissions, resources.Permission{Level: "CAN_VIEW", GroupName: "TestGroup"}) diff --git a/bundle/permissions/workspace_root.go b/bundle/permissions/workspace_root.go index a8eb9e278..a59a039f6 100644 --- a/bundle/permissions/workspace_root.go +++ b/bundle/permissions/workspace_root.go @@ -5,6 +5,7 @@ import ( "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -16,10 +17,10 @@ func ApplyWorkspaceRootPermissions() bundle.Mutator { } // Apply implements bundle.Mutator. -func (*workspaceRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) error { +func (*workspaceRootPermissions) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { err := giveAccessForWorkspaceRoot(ctx, b) if err != nil { - return err + return diag.FromErr(err) } return nil diff --git a/bundle/permissions/workspace_root_test.go b/bundle/permissions/workspace_root_test.go index 6f03204fa..5e23a1da8 100644 --- a/bundle/permissions/workspace_root_test.go +++ b/bundle/permissions/workspace_root_test.go @@ -30,8 +30,8 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) { }, Resources: config.Resources{ Jobs: map[string]*resources.Job{ - "job_1": {JobSettings: &jobs.JobSettings{}}, - "job_2": {JobSettings: &jobs.JobSettings{}}, + "job_1": {JobSettings: &jobs.JobSettings{Name: "job_1"}}, + "job_2": {JobSettings: &jobs.JobSettings{Name: "job_2"}}, }, Pipelines: map[string]*resources.Pipeline{ "pipeline_1": {PipelineSpec: &pipelines.PipelineSpec{}}, @@ -69,6 +69,6 @@ func TestApplyWorkspaceRootPermissions(t *testing.T) { WorkspaceObjectType: "directories", }).Return(nil, nil) - err := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, ApplyWorkspaceRootPermissions()) + require.NoError(t, diags.Error()) } diff --git a/bundle/phases/bind.go b/bundle/phases/bind.go new file mode 100644 index 000000000..b2e92d6e2 --- /dev/null +++ b/bundle/phases/bind.go @@ -0,0 +1,45 @@ +package phases + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/lock" + "github.com/databricks/cli/bundle/deploy/terraform" +) + +func Bind(opts *terraform.BindOptions) bundle.Mutator { + return newPhase( + "bind", + []bundle.Mutator{ + lock.Acquire(), + bundle.Defer( + bundle.Seq( + terraform.StatePull(), + terraform.Interpolate(), + terraform.Write(), + terraform.Import(opts), + terraform.StatePush(), + ), + lock.Release(lock.GoalBind), + ), + }, + ) +} + +func Unbind(resourceType string, resourceKey string) bundle.Mutator { + return newPhase( + "unbind", + []bundle.Mutator{ + lock.Acquire(), + bundle.Defer( + bundle.Seq( + terraform.StatePull(), + terraform.Interpolate(), + terraform.Write(), + terraform.Unbind(resourceType, resourceKey), + terraform.StatePush(), + ), + lock.Release(lock.GoalUnbind), + ), + }, + ) +} diff --git a/bundle/phases/build.go b/bundle/phases/build.go index 760967fca..362d23be1 100644 --- a/bundle/phases/build.go +++ b/bundle/phases/build.go @@ -4,7 +4,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/interpolation" + "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/scripts" ) @@ -18,8 +18,8 @@ func Build() bundle.Mutator { artifacts.InferMissingProperties(), artifacts.BuildAll(), scripts.Execute(config.ScriptPostBuild), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath("artifacts"), + mutator.ResolveVariableReferences( + "artifacts", ), }, ) diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 20fe2e413..46c389189 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -5,6 +5,7 @@ import ( "github.com/databricks/cli/bundle/artifacts" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/deploy" "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/bundle/deploy/lock" "github.com/databricks/cli/bundle/deploy/metadata" @@ -22,16 +23,20 @@ func Deploy() bundle.Mutator { lock.Acquire(), bundle.Defer( bundle.Seq( + terraform.StatePull(), + deploy.StatePull(), mutator.ValidateGitDetails(), - libraries.MatchWithArtifacts(), + libraries.ValidateLocalLibrariesExist(), artifacts.CleanUp(), artifacts.UploadAll(), python.TransformWheelTask(), files.Upload(), + deploy.StateUpdate(), + deploy.StatePush(), permissions.ApplyWorkspaceRootPermissions(), terraform.Interpolate(), terraform.Write(), - terraform.StatePull(), + terraform.CheckRunningResource(), bundle.Defer( terraform.Apply(), bundle.Seq( diff --git a/bundle/phases/destroy.go b/bundle/phases/destroy.go index 216d29210..f1beace84 100644 --- a/bundle/phases/destroy.go +++ b/bundle/phases/destroy.go @@ -1,22 +1,40 @@ package phases import ( + "context" + "errors" + "net/http" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/bundle/deploy/lock" "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/libs/log" + "github.com/databricks/databricks-sdk-go/apierr" ) +func assertRootPathExists(ctx context.Context, b *bundle.Bundle) (bool, error) { + w := b.WorkspaceClient() + _, err := w.Workspace.GetStatusByPath(ctx, b.Config.Workspace.RootPath) + + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.StatusCode == http.StatusNotFound { + log.Infof(ctx, "Root path does not exist: %s", b.Config.Workspace.RootPath) + return false, nil + } + + return true, err +} + // The destroy phase deletes artifacts and resources. func Destroy() bundle.Mutator { - destroyMutator := bundle.Seq( lock.Acquire(), bundle.Defer( bundle.Seq( + terraform.StatePull(), terraform.Interpolate(), terraform.Write(), - terraform.StatePull(), terraform.Plan(terraform.PlanGoal("destroy")), terraform.Destroy(), terraform.StatePush(), @@ -29,6 +47,13 @@ func Destroy() bundle.Mutator { return newPhase( "destroy", - []bundle.Mutator{destroyMutator}, + []bundle.Mutator{ + // Only run deploy mutator if root path exists. + bundle.If( + assertRootPathExists, + destroyMutator, + bundle.LogString("No active deployment found to destroy!"), + ), + }, ) } diff --git a/bundle/phases/initialize.go b/bundle/phases/initialize.go index e0558d937..a32de2c56 100644 --- a/bundle/phases/initialize.go +++ b/bundle/phases/initialize.go @@ -3,9 +3,8 @@ package phases import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/interpolation" "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/config/variable" + pythonmutator "github.com/databricks/cli/bundle/config/mutator/python" "github.com/databricks/cli/bundle/deploy/metadata" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/permissions" @@ -20,26 +19,43 @@ func Initialize() bundle.Mutator { return newPhase( "initialize", []bundle.Mutator{ + mutator.RewriteSyncPaths(), + mutator.MergeJobClusters(), + mutator.MergeJobTasks(), + mutator.MergePipelineClusters(), mutator.InitializeWorkspaceClient(), mutator.PopulateCurrentUser(), mutator.DefineDefaultWorkspaceRoot(), mutator.ExpandWorkspaceRoot(), mutator.DefineDefaultWorkspacePaths(), mutator.SetVariables(), + // Intentionally placed before ResolveVariableReferencesInLookup, ResolveResourceReferences, + // ResolveVariableReferencesInComplexVariables and ResolveVariableReferences. + // See what is expected in PythonMutatorPhaseInit doc + pythonmutator.PythonMutator(pythonmutator.PythonMutatorPhaseInit), + mutator.ResolveVariableReferencesInLookup(), mutator.ResolveResourceReferences(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath("bundle"), - interpolation.IncludeLookupsInPath("workspace"), - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), + mutator.ResolveVariableReferencesInComplexVariables(), + mutator.ResolveVariableReferences( + "bundle", + "workspace", + "variables", ), mutator.SetRunAs(), mutator.OverrideCompute(), mutator.ProcessTargetMode(), + mutator.DefaultQueueing(), mutator.ExpandPipelineGlobPaths(), + + // Configure use of WSFS for reads if the CLI is running on Databricks. + mutator.ConfigureWSFS(), + mutator.TranslatePaths(), python.WrapperWarning(), permissions.ApplyBundlePermissions(), + permissions.FilterCurrentUser(), metadata.AnnotateJobs(), + metadata.AnnotatePipelines(), terraform.Initialize(), scripts.Execute(config.ScriptPostInit), }, diff --git a/bundle/phases/load.go b/bundle/phases/load.go new file mode 100644 index 000000000..fa0668775 --- /dev/null +++ b/bundle/phases/load.go @@ -0,0 +1,29 @@ +package phases + +import ( + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" +) + +// The load phase loads configuration from disk and performs +// lightweight preprocessing (anything that can be done without network I/O). +func Load() bundle.Mutator { + return newPhase( + "load", + mutator.DefaultMutators(), + ) +} + +func LoadDefaultTarget() bundle.Mutator { + return newPhase( + "load", + append(mutator.DefaultMutators(), mutator.SelectDefaultTarget()), + ) +} + +func LoadNamedTarget(target string) bundle.Mutator { + return newPhase( + "load", + append(mutator.DefaultMutators(), mutator.SelectTarget(target)), + ) +} diff --git a/bundle/phases/phase.go b/bundle/phases/phase.go index b594e1f62..1bb4f86a2 100644 --- a/bundle/phases/phase.go +++ b/bundle/phases/phase.go @@ -5,6 +5,7 @@ import ( "context" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" ) @@ -26,7 +27,7 @@ func (p *phase) Name() string { return p.name } -func (p *phase) Apply(ctx context.Context, b *bundle.Bundle) error { +func (p *phase) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { log.Infof(ctx, "Phase: %s", p.Name()) return bundle.Apply(ctx, b, bundle.Seq(p.mutators...)) } diff --git a/bundle/python/conditional_transform_test.go b/bundle/python/conditional_transform_test.go index 5bf337216..677970d70 100644 --- a/bundle/python/conditional_transform_test.go +++ b/bundle/python/conditional_transform_test.go @@ -18,8 +18,8 @@ func TestNoTransformByDefault(t *testing.T) { tmpDir := t.TempDir() b := &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ - Path: tmpDir, Bundle: config.Bundle{ Target: "development", }, @@ -47,8 +47,8 @@ func TestNoTransformByDefault(t *testing.T) { } trampoline := TransformWheelTask() - err := bundle.Apply(context.Background(), b, trampoline) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, trampoline) + require.NoError(t, diags.Error()) task := b.Config.Resources.Jobs["job1"].Tasks[0] require.NotNil(t, task.PythonWheelTask) @@ -63,8 +63,8 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { tmpDir := t.TempDir() b := &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ - Path: tmpDir, Bundle: config.Bundle{ Target: "development", }, @@ -81,6 +81,7 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { }, Libraries: []compute.Library{ {Whl: "/Workspace/Users/test@test.com/bundle/dist/test.whl"}, + {Jar: "/Workspace/Users/test@test.com/bundle/dist/test.jar"}, }, }, }, @@ -95,8 +96,8 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { } trampoline := TransformWheelTask() - err := bundle.Apply(context.Background(), b, trampoline) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, trampoline) + require.NoError(t, diags.Error()) task := b.Config.Resources.Jobs["job1"].Tasks[0] require.Nil(t, task.PythonWheelTask) @@ -105,10 +106,11 @@ func TestTransformWithExperimentalSettingSetToTrue(t *testing.T) { dir, err := b.InternalDir(context.Background()) require.NoError(t, err) - internalDirRel, err := filepath.Rel(b.Config.Path, dir) + internalDirRel, err := filepath.Rel(b.RootPath, dir) require.NoError(t, err) require.Equal(t, path.Join(filepath.ToSlash(internalDirRel), "notebook_job1_key1"), task.NotebookTask.NotebookPath) - require.Empty(t, task.Libraries) + require.Len(t, task.Libraries, 1) + require.Equal(t, "/Workspace/Users/test@test.com/bundle/dist/test.jar", task.Libraries[0].Jar) } diff --git a/bundle/python/transform.go b/bundle/python/transform.go index a3fea2e87..9d3b1ab6a 100644 --- a/bundle/python/transform.go +++ b/bundle/python/transform.go @@ -1,6 +1,7 @@ package python import ( + "context" "fmt" "strconv" "strings" @@ -8,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/jobs" ) @@ -62,9 +64,10 @@ dbutils.notebook.exit(s) // which installs uploaded wheels using %pip and then calling corresponding // entry point. func TransformWheelTask() bundle.Mutator { - return mutator.If( - func(b *bundle.Bundle) bool { - return b.Config.Experimental != nil && b.Config.Experimental.PythonWheelWrapper + return bundle.If( + func(_ context.Context, b *bundle.Bundle) (bool, error) { + res := b.Config.Experimental != nil && b.Config.Experimental.PythonWheelWrapper + return res, nil }, mutator.NewTrampoline( "python_wheel", @@ -79,7 +82,14 @@ type pythonTrampoline struct{} func (t *pythonTrampoline) CleanUp(task *jobs.Task) error { task.PythonWheelTask = nil - task.Libraries = nil + + nonWheelLibraries := make([]compute.Library, 0) + for _, l := range task.Libraries { + if l.Whl == "" { + nonWheelLibraries = append(nonWheelLibraries, l) + } + } + task.Libraries = nonWheelLibraries return nil } @@ -96,7 +106,7 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey { // At this point of moment we don't have local paths in Libraries sections anymore // Local paths have been replaced with the remote when the artifacts where uploaded // in artifacts.UploadAll mutator. - if task.PythonWheelTask == nil || !needsTrampoline(task) { + if task.PythonWheelTask == nil || !needsTrampoline(*task) { continue } @@ -109,18 +119,25 @@ func (t *pythonTrampoline) GetTasks(b *bundle.Bundle) []mutator.TaskWithJobKey { return result } -func needsTrampoline(task *jobs.Task) bool { +func needsTrampoline(task jobs.Task) bool { return libraries.IsTaskWithWorkspaceLibraries(task) } func (t *pythonTrampoline) GetTemplateData(task *jobs.Task) (map[string]any, error) { params, err := t.generateParameters(task.PythonWheelTask) + whlLibraries := make([]compute.Library, 0) + for _, l := range task.Libraries { + if l.Whl != "" { + whlLibraries = append(whlLibraries, l) + } + } + if err != nil { return nil, err } data := map[string]any{ - "Libraries": task.Libraries, + "Libraries": whlLibraries, "Params": params, "Task": task.PythonWheelTask, } diff --git a/bundle/python/transform_test.go b/bundle/python/transform_test.go index b6427ccd8..c15feb424 100644 --- a/bundle/python/transform_test.go +++ b/bundle/python/transform_test.go @@ -116,8 +116,8 @@ func TestTransformFiltersWheelTasksOnly(t *testing.T) { func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { tmpDir := t.TempDir() b := &bundle.Bundle{ + RootPath: tmpDir, Config: config.Root{ - Path: tmpDir, Bundle: config.Bundle{ Target: "development", }, @@ -140,6 +140,6 @@ func TestNoPanicWithNoPythonWheelTasks(t *testing.T) { }, } trampoline := TransformWheelTask() - err := bundle.Apply(context.Background(), b, trampoline) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, trampoline) + require.NoError(t, diags.Error()) } diff --git a/bundle/python/warning.go b/bundle/python/warning.go index 9b9fd8e59..3da88b0d7 100644 --- a/bundle/python/warning.go +++ b/bundle/python/warning.go @@ -2,11 +2,11 @@ package python import ( "context" - "fmt" "strings" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "golang.org/x/mod/semver" @@ -19,13 +19,13 @@ func WrapperWarning() bundle.Mutator { return &wrapperWarning{} } -func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) error { +func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { if isPythonWheelWrapperOn(b) { return nil } if hasIncompatibleWheelTasks(ctx, b) { - return fmt.Errorf("python wheel tasks with local libraries require compute with DBR 13.1+. Please change your cluster configuration or set experimental 'python_wheel_wrapper' setting to 'true'") + return diag.Errorf("Python wheel tasks require compute with DBR 13.3+ to include local libraries. Please change your cluster configuration or use the experimental 'python_wheel_wrapper' setting. See https://docs.databricks.com/dev-tools/bundles/python-wheel.html for more information.") } return nil } @@ -46,7 +46,7 @@ func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool { if task.JobClusterKey != "" { for _, job := range b.Config.Resources.Jobs { for _, cluster := range job.JobClusters { - if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster != nil { + if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster.SparkVersion != "" { if lowerThanExpectedVersion(ctx, cluster.NewCluster.SparkVersion) { return true } diff --git a/bundle/python/warning_test.go b/bundle/python/warning_test.go index c8dde59ec..dd6397f78 100644 --- a/bundle/python/warning_test.go +++ b/bundle/python/warning_test.go @@ -63,13 +63,13 @@ func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) { JobClusters: []jobs.JobCluster{ { JobClusterKey: "cluster1", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "12.2.x-scala2.12", }, }, { JobClusterKey: "cluster2", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "13.1.x-scala2.12", }, }, @@ -101,8 +101,8 @@ func TestIncompatibleWheelTasksWithJobClusterKey(t *testing.T) { require.True(t, hasIncompatibleWheelTasks(context.Background(), b)) - err := bundle.Apply(context.Background(), b, WrapperWarning()) - require.ErrorContains(t, err, "python wheel tasks with local libraries require compute with DBR 13.1+.") + diags := bundle.Apply(context.Background(), b, WrapperWarning()) + require.ErrorContains(t, diags.Error(), "require compute with DBR 13.3") } func TestIncompatibleWheelTasksWithExistingClusterId(t *testing.T) { @@ -157,13 +157,13 @@ func TestNoIncompatibleWheelTasks(t *testing.T) { JobClusters: []jobs.JobCluster{ { JobClusterKey: "cluster1", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "12.2.x-scala2.12", }, }, { JobClusterKey: "cluster2", - NewCluster: &compute.ClusterSpec{ + NewCluster: compute.ClusterSpec{ SparkVersion: "13.1.x-scala2.12", }, }, @@ -280,8 +280,8 @@ func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) { }, } - err := bundle.Apply(context.Background(), b, WrapperWarning()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, WrapperWarning()) + require.NoError(t, diags.Error()) } func TestSparkVersionLowerThanExpected(t *testing.T) { diff --git a/bundle/render/render_text_output.go b/bundle/render/render_text_output.go new file mode 100644 index 000000000..439ae6132 --- /dev/null +++ b/bundle/render/render_text_output.go @@ -0,0 +1,185 @@ +package render + +import ( + "fmt" + "io" + "path/filepath" + "strings" + "text/template" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/fatih/color" +) + +var renderFuncMap = template.FuncMap{ + "red": color.RedString, + "green": color.GreenString, + "blue": color.BlueString, + "yellow": color.YellowString, + "magenta": color.MagentaString, + "cyan": color.CyanString, + "bold": func(format string, a ...interface{}) string { + return color.New(color.Bold).Sprintf(format, a...) + }, + "italic": func(format string, a ...interface{}) string { + return color.New(color.Italic).Sprintf(format, a...) + }, +} + +const errorTemplate = `{{ "Error" | red }}: {{ .Summary }} +{{- if .Path.String }} + {{ "at " }}{{ .Path.String | green }} +{{- end }} +{{- if .Location.File }} + {{ "in " }}{{ .Location.String | cyan }} +{{- end }} +{{- if .Detail }} + +{{ .Detail }} +{{- end }} + +` + +const warningTemplate = `{{ "Warning" | yellow }}: {{ .Summary }} +{{- if .Path.String }} + {{ "at " }}{{ .Path.String | green }} +{{- end }} +{{- if .Location.File }} + {{ "in " }}{{ .Location.String | cyan }} +{{- end }} +{{- if .Detail }} + +{{ .Detail }} +{{- end }} + +` + +const summaryTemplate = `{{- if .Name -}} +Name: {{ .Name | bold }} +{{- if .Target }} +Target: {{ .Target | bold }} +{{- end }} +{{- if or .User .Host .Path }} +Workspace: +{{- if .Host }} + Host: {{ .Host | bold }} +{{- end }} +{{- if .User }} + User: {{ .User | bold }} +{{- end }} +{{- if .Path }} + Path: {{ .Path | bold }} +{{- end }} +{{- end }} + +{{ end -}} + +{{ .Trailer }} +` + +func pluralize(n int, singular, plural string) string { + if n == 1 { + return fmt.Sprintf("%d %s", n, singular) + } + return fmt.Sprintf("%d %s", n, plural) +} + +func buildTrailer(diags diag.Diagnostics) string { + parts := []string{} + if errors := len(diags.Filter(diag.Error)); errors > 0 { + parts = append(parts, color.RedString(pluralize(errors, "error", "errors"))) + } + if warnings := len(diags.Filter(diag.Warning)); warnings > 0 { + parts = append(parts, color.YellowString(pluralize(warnings, "warning", "warnings"))) + } + if len(parts) > 0 { + return fmt.Sprintf("Found %s", strings.Join(parts, " and ")) + } else { + return color.GreenString("Validation OK!") + } +} + +func renderSummaryTemplate(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { + if b == nil { + return renderSummaryTemplate(out, &bundle.Bundle{}, diags) + } + + var currentUser = &iam.User{} + + if b.Config.Workspace.CurrentUser != nil { + if b.Config.Workspace.CurrentUser.User != nil { + currentUser = b.Config.Workspace.CurrentUser.User + } + } + + t := template.Must(template.New("summary").Funcs(renderFuncMap).Parse(summaryTemplate)) + err := t.Execute(out, map[string]any{ + "Name": b.Config.Bundle.Name, + "Target": b.Config.Bundle.Target, + "User": currentUser.UserName, + "Path": b.Config.Workspace.RootPath, + "Host": b.Config.Workspace.Host, + "Trailer": buildTrailer(diags), + }) + + return err +} + +func renderDiagnostics(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics) error { + errorT := template.Must(template.New("error").Funcs(renderFuncMap).Parse(errorTemplate)) + warningT := template.Must(template.New("warning").Funcs(renderFuncMap).Parse(warningTemplate)) + + // Print errors and warnings. + for _, d := range diags { + var t *template.Template + switch d.Severity { + case diag.Error: + t = errorT + case diag.Warning: + t = warningT + } + + // Make file relative to bundle root + if d.Location.File != "" && b != nil { + out, err := filepath.Rel(b.RootPath, d.Location.File) + // if we can't relativize the path, just use path as-is + if err == nil { + d.Location.File = out + } + } + + // Render the diagnostic with the appropriate template. + err := t.Execute(out, d) + if err != nil { + return fmt.Errorf("failed to render template: %w", err) + } + } + + return nil +} + +// RenderOptions contains options for rendering diagnostics. +type RenderOptions struct { + // variable to include leading new line + + RenderSummaryTable bool +} + +// RenderTextOutput renders the diagnostics in a human-readable format. +func RenderTextOutput(out io.Writer, b *bundle.Bundle, diags diag.Diagnostics, opts RenderOptions) error { + err := renderDiagnostics(out, b, diags) + if err != nil { + return fmt.Errorf("failed to render diagnostics: %w", err) + } + + if opts.RenderSummaryTable { + err = renderSummaryTemplate(out, b, diags) + if err != nil { + return fmt.Errorf("failed to render summary: %w", err) + } + } + + return nil +} diff --git a/bundle/render/render_text_output_test.go b/bundle/render/render_text_output_test.go new file mode 100644 index 000000000..b7aec8864 --- /dev/null +++ b/bundle/render/render_text_output_test.go @@ -0,0 +1,302 @@ +package render + +import ( + "bytes" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/require" +) + +type renderTestOutputTestCase struct { + name string + bundle *bundle.Bundle + diags diag.Diagnostics + opts RenderOptions + expected string +} + +func TestRenderTextOutput(t *testing.T) { + loadingBundle := &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "test-bundle", + Target: "test-target", + }, + }, + } + + testCases := []renderTestOutputTestCase{ + { + name: "nil bundle and 1 error", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "failed to load xxx", + }, + }, + opts: RenderOptions{RenderSummaryTable: true}, + expected: "Error: failed to load xxx\n" + + "\n" + + "Found 1 error\n", + }, + { + name: "bundle during 'load' and 1 error", + bundle: loadingBundle, + diags: diag.Errorf("failed to load bundle"), + opts: RenderOptions{RenderSummaryTable: true}, + expected: "Error: failed to load bundle\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 1 error\n", + }, + { + name: "bundle during 'load' and 1 warning", + bundle: loadingBundle, + diags: diag.Warningf("failed to load bundle"), + opts: RenderOptions{RenderSummaryTable: true}, + expected: "Warning: failed to load bundle\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 1 warning\n", + }, + { + name: "bundle during 'load' and 2 warnings", + bundle: loadingBundle, + diags: diag.Warningf("warning (1)").Extend(diag.Warningf("warning (2)")), + opts: RenderOptions{RenderSummaryTable: true}, + expected: "Warning: warning (1)\n" + + "\n" + + "Warning: warning (2)\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 2 warnings\n", + }, + { + name: "bundle during 'load' and 2 errors, 1 warning with details", + bundle: loadingBundle, + diags: diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Error, + Summary: "error (1)", + Detail: "detail (1)", + Location: dyn.Location{ + File: "foo.py", + Line: 1, + Column: 1, + }, + }, + diag.Diagnostic{ + Severity: diag.Error, + Summary: "error (2)", + Detail: "detail (2)", + Location: dyn.Location{ + File: "foo.py", + Line: 2, + Column: 1, + }, + }, + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "warning (3)", + Detail: "detail (3)", + Location: dyn.Location{ + File: "foo.py", + Line: 3, + Column: 1, + }, + }, + }, + opts: RenderOptions{RenderSummaryTable: true}, + expected: "Error: error (1)\n" + + " in foo.py:1:1\n" + + "\n" + + "detail (1)\n" + + "\n" + + "Error: error (2)\n" + + " in foo.py:2:1\n" + + "\n" + + "detail (2)\n" + + "\n" + + "Warning: warning (3)\n" + + " in foo.py:3:1\n" + + "\n" + + "detail (3)\n" + + "\n" + + "Name: test-bundle\n" + + "Target: test-target\n" + + "\n" + + "Found 2 errors and 1 warning\n", + }, + { + name: "bundle during 'init'", + bundle: &bundle.Bundle{ + Config: config.Root{ + Bundle: config.Bundle{ + Name: "test-bundle", + Target: "test-target", + }, + Workspace: config.Workspace{ + Host: "https://localhost/", + CurrentUser: &config.User{ + User: &iam.User{ + UserName: "test-user", + }, + }, + RootPath: "/Users/test-user@databricks.com/.bundle/examples/test-target", + }, + }, + }, + diags: nil, + opts: RenderOptions{RenderSummaryTable: true}, + expected: "Name: test-bundle\n" + + "Target: test-target\n" + + "Workspace:\n" + + " Host: https://localhost/\n" + + " User: test-user\n" + + " Path: /Users/test-user@databricks.com/.bundle/examples/test-target\n" + + "\n" + + "Validation OK!\n", + }, + { + name: "nil bundle without summary with 1 error and 1 warning", + bundle: nil, + diags: diag.Diagnostics{ + diag.Diagnostic{ + Severity: diag.Error, + Summary: "error (1)", + Detail: "detail (1)", + Location: dyn.Location{ + File: "foo.py", + Line: 1, + Column: 1, + }, + }, + diag.Diagnostic{ + Severity: diag.Warning, + Summary: "warning (2)", + Detail: "detail (2)", + Location: dyn.Location{ + File: "foo.py", + Line: 3, + Column: 1, + }, + }, + }, + opts: RenderOptions{RenderSummaryTable: false}, + expected: "Error: error (1)\n" + + " in foo.py:1:1\n" + + "\n" + + "detail (1)\n" + + "\n" + + "Warning: warning (2)\n" + + " in foo.py:3:1\n" + + "\n" + + "detail (2)\n" + + "\n", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + writer := &bytes.Buffer{} + + err := RenderTextOutput(writer, tc.bundle, tc.diags, tc.opts) + require.NoError(t, err) + + assert.Equal(t, tc.expected, writer.String()) + }) + } +} + +type renderDiagnosticsTestCase struct { + name string + diags diag.Diagnostics + expected string +} + +func TestRenderDiagnostics(t *testing.T) { + bundle := &bundle.Bundle{} + + testCases := []renderDiagnosticsTestCase{ + { + name: "empty diagnostics", + diags: diag.Diagnostics{}, + expected: "", + }, + { + name: "error with short summary", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "failed to load xxx", + }, + }, + expected: "Error: failed to load xxx\n\n", + }, + { + name: "error with source location", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Summary: "failed to load xxx", + Detail: "'name' is required", + Location: dyn.Location{ + File: "foo.yaml", + Line: 1, + Column: 2, + }, + }, + }, + expected: "Error: failed to load xxx\n" + + " in foo.yaml:1:2\n\n" + + "'name' is required\n\n", + }, + { + name: "error with path", + diags: diag.Diagnostics{ + { + Severity: diag.Error, + Detail: "'name' is required", + Summary: "failed to load xxx", + Path: dyn.MustPathFromString("resources.jobs.xxx"), + }, + }, + expected: "Error: failed to load xxx\n" + + " at resources.jobs.xxx\n" + + "\n" + + "'name' is required\n\n", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + writer := &bytes.Buffer{} + + err := renderDiagnostics(writer, bundle, tc.diags) + require.NoError(t, err) + + assert.Equal(t, tc.expected, writer.String()) + }) + } +} + +func TestRenderSummaryTemplate_nilBundle(t *testing.T) { + writer := &bytes.Buffer{} + + err := renderSummaryTemplate(writer, nil, nil) + require.NoError(t, err) + + assert.Equal(t, "Validation OK!\n", writer.String()) +} diff --git a/bundle/root.go b/bundle/root.go index 7518bf5fc..efc21e0ca 100644 --- a/bundle/root.go +++ b/bundle/root.go @@ -7,7 +7,7 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" - "github.com/databricks/cli/folders" + "github.com/databricks/cli/libs/folders" ) // getRootEnv returns the value of the bundle root environment variable diff --git a/bundle/root_test.go b/bundle/root_test.go index 88113546c..99bf58a00 100644 --- a/bundle/root_test.go +++ b/bundle/root_test.go @@ -8,30 +8,10 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/env" - "github.com/stretchr/testify/assert" + "github.com/databricks/cli/internal/testutil" "github.com/stretchr/testify/require" ) -// Changes into specified directory for the duration of the test. -// Returns the current working directory. -func chdir(t *testing.T, dir string) string { - wd, err := os.Getwd() - require.NoError(t, err) - - abs, err := filepath.Abs(dir) - require.NoError(t, err) - - err = os.Chdir(abs) - require.NoError(t, err) - - t.Cleanup(func() { - err := os.Chdir(wd) - require.NoError(t, err) - }) - - return wd -} - func TestRootFromEnv(t *testing.T) { ctx := context.Background() dir := t.TempDir() @@ -83,7 +63,7 @@ func TestRootLookup(t *testing.T) { t.Setenv(env.RootVariable, "") os.Unsetenv(env.RootVariable) - chdir(t, t.TempDir()) + testutil.Chdir(t, t.TempDir()) // Create databricks.yml file. f, err := os.Create(config.FileNames[0]) @@ -95,7 +75,7 @@ func TestRootLookup(t *testing.T) { require.NoError(t, err) // It should find the project root from $PWD. - wd := chdir(t, "./a/b/c") + wd := testutil.Chdir(t, "./a/b/c") root, err := mustGetRoot(ctx) require.NoError(t, err) require.Equal(t, wd, root) @@ -109,53 +89,7 @@ func TestRootLookupError(t *testing.T) { os.Unsetenv(env.RootVariable) // It can't find a project root from a temporary directory. - _ = chdir(t, t.TempDir()) + _ = testutil.Chdir(t, t.TempDir()) _, err := mustGetRoot(ctx) require.ErrorContains(t, err, "unable to locate bundle root") } - -func TestLoadYamlWhenIncludesEnvPresent(t *testing.T) { - ctx := context.Background() - chdir(t, filepath.Join(".", "tests", "basic")) - t.Setenv(env.IncludesVariable, "test") - - bundle, err := MustLoad(ctx) - assert.NoError(t, err) - assert.Equal(t, "basic", bundle.Config.Bundle.Name) - - cwd, err := os.Getwd() - assert.NoError(t, err) - assert.Equal(t, cwd, bundle.Config.Path) -} - -func TestLoadDefautlBundleWhenNoYamlAndRootAndIncludesEnvPresent(t *testing.T) { - ctx := context.Background() - dir := t.TempDir() - chdir(t, dir) - t.Setenv(env.RootVariable, dir) - t.Setenv(env.IncludesVariable, "test") - - bundle, err := MustLoad(ctx) - assert.NoError(t, err) - assert.Equal(t, dir, bundle.Config.Path) -} - -func TestErrorIfNoYamlNoRootEnvAndIncludesEnvPresent(t *testing.T) { - ctx := context.Background() - dir := t.TempDir() - chdir(t, dir) - t.Setenv(env.IncludesVariable, "test") - - _, err := MustLoad(ctx) - assert.Error(t, err) -} - -func TestErrorIfNoYamlNoIncludesEnvAndRootEnvPresent(t *testing.T) { - ctx := context.Background() - dir := t.TempDir() - chdir(t, dir) - t.Setenv(env.RootVariable, dir) - - _, err := MustLoad(ctx) - assert.Error(t, err) -} diff --git a/bundle/run/args.go b/bundle/run/args.go new file mode 100644 index 000000000..2885cda01 --- /dev/null +++ b/bundle/run/args.go @@ -0,0 +1,127 @@ +package run + +import ( + "fmt" + "strings" + + "github.com/spf13/cobra" +) + +// argsHandler defines the (unexported) interface for the runners in this +// package to implement to handle context-specific positional arguments. +// +// For jobs, this means: +// - If a job uses job parameters: parse positional arguments into key-value pairs +// and pass them as job parameters. +// - If a job does not use job parameters AND only has Spark Python tasks: +// pass through the positional arguments as a list of Python parameters. +// - If a job does not use job parameters AND only has notebook tasks: +// parse arguments into key-value pairs and pass them as notebook parameters. +// - ... +// +// In all cases, we may be able to provide context-aware argument completions. +type argsHandler interface { + // Parse additional positional arguments. + ParseArgs(args []string, opts *Options) error + + // Complete additional positional arguments. + CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) +} + +// nopArgsHandler is a no-op implementation of [argsHandler]. +// It returns an error if any positional arguments are present and doesn't complete anything. +type nopArgsHandler struct{} + +func (nopArgsHandler) ParseArgs(args []string, opts *Options) error { + if len(args) == 0 { + return nil + } + + return fmt.Errorf("received %d unexpected positional arguments", len(args)) +} + +func (nopArgsHandler) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +// argsToKeyValueMap parses key-value pairs from the specified arguments. +// +// It accepts these formats: +// - `--key=value` +// - `--key`, `value` +// +// Remaining arguments are returned as-is. +func argsToKeyValueMap(args []string) (map[string]string, []string) { + kv := make(map[string]string) + key := "" + tail := args + + for i, arg := range args { + // If key is set; use the next argument as value. + if key != "" { + kv[key] = arg + key = "" + tail = args[i+1:] + continue + } + + if strings.HasPrefix(arg, "--") { + parts := strings.SplitN(arg[2:], "=", 2) + if len(parts) == 2 { + kv[parts[0]] = parts[1] + tail = args[i+1:] + continue + } + + // Use this argument as key, the next as value. + key = parts[0] + continue + } + + // If we cannot interpret it; return here. + break + } + + return kv, tail +} + +// genericParseKeyValueArgs parses key-value pairs from the specified arguments. +// If there are any positional arguments left, it returns an error. +func genericParseKeyValueArgs(args []string) (map[string]string, error) { + kv, args := argsToKeyValueMap(args) + if len(args) > 0 { + return nil, fmt.Errorf("received %d unexpected positional arguments", len(args)) + } + + return kv, nil +} + +// genericCompleteKeyValueArgs completes key-value pairs from the specified arguments. +// Completion options that are already specified are skipped. +func genericCompleteKeyValueArgs(args []string, toComplete string, options []string) ([]string, cobra.ShellCompDirective) { + // If the string to complete contains an equals sign, then we are + // completing the value part (which we don't know here). + if strings.Contains(toComplete, "=") { + return nil, cobra.ShellCompDirectiveNoFileComp + } + + // Remove already completed key/value pairs. + kv, args := argsToKeyValueMap(args) + + // If the list of remaining args is empty, return possible completions. + if len(args) == 0 { + var completions []string + for _, option := range options { + // Skip options that have already been specified. + if _, ok := kv[option]; ok { + continue + } + completions = append(completions, fmt.Sprintf("--%s=", option)) + } + // Note: we include cobra.ShellCompDirectiveNoSpace to suggest including + // the value part right after the equals sign. + return completions, cobra.ShellCompDirectiveNoFileComp | cobra.ShellCompDirectiveNoSpace + } + + return nil, cobra.ShellCompDirectiveNoFileComp +} diff --git a/bundle/run/args_test.go b/bundle/run/args_test.go new file mode 100644 index 000000000..aff14b481 --- /dev/null +++ b/bundle/run/args_test.go @@ -0,0 +1,134 @@ +package run + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestNopArgsHandler(t *testing.T) { + h := nopArgsHandler{} + opts := &Options{} + + // No error if no positional arguments are passed. + err := h.ParseArgs([]string{}, opts) + assert.NoError(t, err) + + // Error if any positional arguments are passed. + err = h.ParseArgs([]string{"foo"}, opts) + assert.EqualError(t, err, "received 1 unexpected positional arguments") + + // No completions. + completions, _ := h.CompleteArgs([]string{}, "") + assert.Nil(t, completions) +} + +func TestArgsToKeyValueMap(t *testing.T) { + for _, tc := range []struct { + input []string + expected map[string]string + tail []string + err error + }{ + { + input: []string{}, + expected: map[string]string{}, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz", "qux"}, + expected: map[string]string{ + "foo": "bar", + "baz": "qux", + }, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz", "qux", "tail"}, + expected: map[string]string{ + "foo": "bar", + "baz": "qux", + }, + tail: []string{"tail"}, + }, + { + input: []string{"--foo=bar", "--baz", "qux", "tail", "--foo=bar"}, + expected: map[string]string{ + "foo": "bar", + "baz": "qux", + }, + tail: []string{"tail", "--foo=bar"}, + }, + { + input: []string{"--foo=bar", "--baz=qux"}, + expected: map[string]string{ + "foo": "bar", + "baz": "qux", + }, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz=--qux"}, + expected: map[string]string{ + "foo": "bar", + "baz": "--qux", + }, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz="}, + expected: map[string]string{ + "foo": "bar", + "baz": "", + }, + tail: []string{}, + }, + { + input: []string{"--foo=bar", "--baz"}, + expected: map[string]string{ + "foo": "bar", + }, + tail: []string{"--baz"}, + }, + } { + actual, tail := argsToKeyValueMap(tc.input) + assert.Equal(t, tc.expected, actual) + assert.Equal(t, tc.tail, tail) + } +} + +func TestGenericParseKeyValueArgs(t *testing.T) { + kv, err := genericParseKeyValueArgs([]string{"--foo=bar", "--baz", "qux"}) + assert.NoError(t, err) + assert.Equal(t, map[string]string{ + "foo": "bar", + "baz": "qux", + }, kv) + + _, err = genericParseKeyValueArgs([]string{"--foo=bar", "--baz", "qux", "tail"}) + assert.EqualError(t, err, "received 1 unexpected positional arguments") +} + +func TestGenericCompleteKeyValueArgs(t *testing.T) { + var completions []string + + // Complete nothing if there are no options. + completions, _ = genericCompleteKeyValueArgs([]string{}, ``, []string{}) + assert.Empty(t, completions) + + // Complete nothing if we're in the middle of a key-value pair (as single argument with equals sign). + completions, _ = genericCompleteKeyValueArgs([]string{}, `--foo=`, []string{`foo`, `bar`}) + assert.Empty(t, completions) + + // Complete nothing if we're in the middle of a key-value pair (as two arguments). + completions, _ = genericCompleteKeyValueArgs([]string{`--foo`}, ``, []string{`foo`, `bar`}) + assert.Empty(t, completions) + + // Complete if we're at the beginning. + completions, _ = genericCompleteKeyValueArgs([]string{}, ``, []string{`foo`, `bar`}) + assert.Equal(t, []string{`--foo=`, `--bar=`}, completions) + + // Complete if we have already one key-value pair. + completions, _ = genericCompleteKeyValueArgs([]string{`--foo=bar`}, ``, []string{`foo`, `bar`}) + assert.Equal(t, []string{`--bar=`}, completions) +} diff --git a/bundle/run/job.go b/bundle/run/job.go index a54279c11..8003c7d29 100644 --- a/bundle/run/job.go +++ b/bundle/run/job.go @@ -15,6 +15,8 @@ import ( "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/fatih/color" + "github.com/spf13/cobra" + "golang.org/x/sync/errgroup" ) // Default timeout for waiting for a job run to complete. @@ -275,3 +277,50 @@ func (r *jobRunner) convertPythonParams(opts *Options) error { return nil } + +func (r *jobRunner) Cancel(ctx context.Context) error { + w := r.bundle.WorkspaceClient() + jobID, err := strconv.ParseInt(r.job.ID, 10, 64) + if err != nil { + return fmt.Errorf("job ID is not an integer: %s", r.job.ID) + } + + runs, err := w.Jobs.ListRunsAll(ctx, jobs.ListRunsRequest{ + ActiveOnly: true, + JobId: jobID, + }) + + if err != nil { + return err + } + + if len(runs) == 0 { + return nil + } + + errGroup, errCtx := errgroup.WithContext(ctx) + for _, run := range runs { + runId := run.RunId + errGroup.Go(func() error { + wait, err := w.Jobs.CancelRun(errCtx, jobs.CancelRun{ + RunId: runId, + }) + if err != nil { + return err + } + // Waits for the Terminated or Skipped state + _, err = wait.GetWithTimeout(jobRunTimeout) + return err + }) + } + + return errGroup.Wait() +} + +func (r *jobRunner) ParseArgs(args []string, opts *Options) error { + return r.posArgsHandler().ParseArgs(args, opts) +} + +func (r *jobRunner) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return r.posArgsHandler().CompleteArgs(args, toComplete) +} diff --git a/bundle/run/job_args.go b/bundle/run/job_args.go new file mode 100644 index 000000000..85cf96efb --- /dev/null +++ b/bundle/run/job_args.go @@ -0,0 +1,184 @@ +package run + +import ( + "github.com/databricks/cli/bundle/config/resources" + "github.com/spf13/cobra" + "golang.org/x/exp/maps" +) + +type jobParameterArgs struct { + *resources.Job +} + +func (a jobParameterArgs) ParseArgs(args []string, opts *Options) error { + kv, err := genericParseKeyValueArgs(args) + if err != nil { + return err + } + + // Merge the key-value pairs from the args into the options struct. + if opts.Job.jobParams == nil { + opts.Job.jobParams = kv + } else { + for k, v := range kv { + opts.Job.jobParams[k] = v + } + } + return nil +} + +func (a jobParameterArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + var completions []string + for _, param := range a.Parameters { + completions = append(completions, param.Name) + } + return genericCompleteKeyValueArgs(args, toComplete, completions) +} + +type jobTaskNotebookParamArgs struct { + *resources.Job +} + +func (a jobTaskNotebookParamArgs) ParseArgs(args []string, opts *Options) error { + kv, err := genericParseKeyValueArgs(args) + if err != nil { + return err + } + + // Merge the key-value pairs from the args into the options struct. + if opts.Job.notebookParams == nil { + opts.Job.notebookParams = kv + } else { + for k, v := range kv { + opts.Job.notebookParams[k] = v + } + } + return nil +} + +func (a jobTaskNotebookParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + parameters := make(map[string]string) + for _, t := range a.Tasks { + if nt := t.NotebookTask; nt != nil { + maps.Copy(parameters, nt.BaseParameters) + } + } + return genericCompleteKeyValueArgs(args, toComplete, maps.Keys(parameters)) +} + +type jobTaskJarParamArgs struct { + *resources.Job +} + +func (a jobTaskJarParamArgs) ParseArgs(args []string, opts *Options) error { + opts.Job.jarParams = append(opts.Job.jarParams, args...) + return nil +} + +func (a jobTaskJarParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +type jobTaskPythonParamArgs struct { + *resources.Job +} + +func (a jobTaskPythonParamArgs) ParseArgs(args []string, opts *Options) error { + opts.Job.pythonParams = append(opts.Job.pythonParams, args...) + return nil +} + +func (a jobTaskPythonParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +type jobTaskSparkSubmitParamArgs struct { + *resources.Job +} + +func (a jobTaskSparkSubmitParamArgs) ParseArgs(args []string, opts *Options) error { + opts.Job.sparkSubmitParams = append(opts.Job.sparkSubmitParams, args...) + return nil +} + +func (a jobTaskSparkSubmitParamArgs) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} + +type jobTaskType int + +const ( + jobTaskTypeNotebook jobTaskType = iota + 1 + jobTaskTypeSparkJar + jobTaskTypeSparkPython + jobTaskTypeSparkSubmit + jobTaskTypePipeline + jobTaskTypePythonWheel + jobTaskTypeSql + jobTaskTypeDbt + jobTaskTypeRunJob +) + +func (r *jobRunner) posArgsHandler() argsHandler { + job := r.job + if job == nil || job.JobSettings == nil { + return nopArgsHandler{} + } + + // Handle job parameters, if any are defined. + if len(job.Parameters) > 0 { + return &jobParameterArgs{job} + } + + // Handle task parameters otherwise. + var seen = make(map[jobTaskType]bool) + for _, t := range job.Tasks { + if t.NotebookTask != nil { + seen[jobTaskTypeNotebook] = true + } + if t.SparkJarTask != nil { + seen[jobTaskTypeSparkJar] = true + } + if t.SparkPythonTask != nil { + seen[jobTaskTypeSparkPython] = true + } + if t.SparkSubmitTask != nil { + seen[jobTaskTypeSparkSubmit] = true + } + if t.PipelineTask != nil { + seen[jobTaskTypePipeline] = true + } + if t.PythonWheelTask != nil { + seen[jobTaskTypePythonWheel] = true + } + if t.SqlTask != nil { + seen[jobTaskTypeSql] = true + } + if t.DbtTask != nil { + seen[jobTaskTypeDbt] = true + } + if t.RunJobTask != nil { + seen[jobTaskTypeRunJob] = true + } + } + + // Cannot handle positional arguments if we have more than one task type. + keys := maps.Keys(seen) + if len(keys) != 1 { + return nopArgsHandler{} + } + + switch keys[0] { + case jobTaskTypeNotebook: + return jobTaskNotebookParamArgs{job} + case jobTaskTypeSparkJar: + return jobTaskJarParamArgs{job} + case jobTaskTypeSparkPython, jobTaskTypePythonWheel: + return jobTaskPythonParamArgs{job} + case jobTaskTypeSparkSubmit: + return jobTaskSparkSubmitParamArgs{job} + default: + // No positional argument handling for other task types. + return nopArgsHandler{} + } +} diff --git a/bundle/run/job_args_test.go b/bundle/run/job_args_test.go new file mode 100644 index 000000000..709994907 --- /dev/null +++ b/bundle/run/job_args_test.go @@ -0,0 +1,223 @@ +package run + +import ( + "testing" + + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/assert" +) + +func TestJobParameterArgs(t *testing.T) { + a := jobParameterArgs{ + &resources.Job{ + JobSettings: &jobs.JobSettings{ + Parameters: []jobs.JobParameterDefinition{ + { + Name: "foo", + Default: "value", + }, + { + Name: "bar", + Default: "value", + }, + }, + }, + }, + } + + t.Run("ParseArgsError", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"--p1=v1", "superfluous"}, &opts) + assert.ErrorContains(t, err, "unexpected positional arguments") + }) + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"--p1=v1", "--p2=v2"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + map[string]string{ + "p1": "v1", + "p2": "v2", + }, + opts.Job.jobParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.jobParams = map[string]string{"p1": "v1"} + err := a.ParseArgs([]string{"--p2=v2"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + map[string]string{ + "p1": "v1", + "p2": "v2", + }, + opts.Job.jobParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.Equal(t, []string{"--foo=", "--bar="}, completions) + }) +} + +func TestJobTaskNotebookParamArgs(t *testing.T) { + a := jobTaskNotebookParamArgs{ + &resources.Job{ + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + NotebookTask: &jobs.NotebookTask{ + BaseParameters: map[string]string{ + "foo": "value", + "bar": "value", + }, + }, + }, + }, + }, + }, + } + + t.Run("ParseArgsError", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"--p1=v1", "superfluous"}, &opts) + assert.ErrorContains(t, err, "unexpected positional arguments") + }) + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"--p1=v1", "--p2=v2"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + map[string]string{ + "p1": "v1", + "p2": "v2", + }, + opts.Job.notebookParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.notebookParams = map[string]string{"p1": "v1"} + err := a.ParseArgs([]string{"--p2=v2"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + map[string]string{ + "p1": "v1", + "p2": "v2", + }, + opts.Job.notebookParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.ElementsMatch(t, []string{"--foo=", "--bar="}, completions) + }) +} + +func TestJobTaskJarParamArgs(t *testing.T) { + a := jobTaskJarParamArgs{} + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"foo", "bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.jarParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.jarParams = []string{"foo"} + err := a.ParseArgs([]string{"bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.jarParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.Empty(t, completions) + }) +} + +func TestJobTaskPythonParamArgs(t *testing.T) { + a := jobTaskPythonParamArgs{} + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"foo", "bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.pythonParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.pythonParams = []string{"foo"} + err := a.ParseArgs([]string{"bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.pythonParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.Empty(t, completions) + }) +} + +func TestJobTaskSparkSubmitParamArgs(t *testing.T) { + a := jobTaskSparkSubmitParamArgs{} + + t.Run("ParseArgs", func(t *testing.T) { + var opts Options + err := a.ParseArgs([]string{"foo", "bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.sparkSubmitParams, + ) + }) + + t.Run("ParseArgsAppend", func(t *testing.T) { + var opts Options + opts.Job.sparkSubmitParams = []string{"foo"} + err := a.ParseArgs([]string{"bar"}, &opts) + assert.NoError(t, err) + assert.Equal( + t, + []string{"foo", "bar"}, + opts.Job.sparkSubmitParams, + ) + }) + + t.Run("CompleteArgs", func(t *testing.T) { + completions, _ := a.CompleteArgs([]string{}, "") + assert.Empty(t, completions) + }) +} diff --git a/bundle/run/job_options.go b/bundle/run/job_options.go index 209591d76..c359e79eb 100644 --- a/bundle/run/job_options.go +++ b/bundle/run/job_options.go @@ -27,8 +27,11 @@ type JobOptions struct { jobParams map[string]string } -func (o *JobOptions) Define(fs *flag.FlagSet) { - // Define task parameters flags. +func (o *JobOptions) DefineJobOptions(fs *flag.FlagSet) { + fs.StringToStringVar(&o.jobParams, "params", nil, "comma separated k=v pairs for job parameters") +} + +func (o *JobOptions) DefineTaskOptions(fs *flag.FlagSet) { fs.StringSliceVar(&o.dbtCommands, "dbt-commands", nil, "A list of commands to execute for jobs with DBT tasks.") fs.StringSliceVar(&o.jarParams, "jar-params", nil, "A list of parameters for jobs with Spark JAR tasks.") fs.StringToStringVar(&o.notebookParams, "notebook-params", nil, "A map from keys to values for jobs with notebook tasks.") @@ -37,9 +40,6 @@ func (o *JobOptions) Define(fs *flag.FlagSet) { fs.StringSliceVar(&o.pythonParams, "python-params", nil, "A list of parameters for jobs with Python tasks.") fs.StringSliceVar(&o.sparkSubmitParams, "spark-submit-params", nil, "A list of parameters for jobs with Spark submit tasks.") fs.StringToStringVar(&o.sqlParams, "sql-params", nil, "A map from keys to values for jobs with SQL tasks.") - - // Define job parameters flag. - fs.StringToStringVar(&o.jobParams, "params", nil, "comma separated k=v pairs for job parameters") } func (o *JobOptions) hasTaskParametersConfigured() bool { diff --git a/bundle/run/job_options_test.go b/bundle/run/job_options_test.go index 822771d8e..08e18d95d 100644 --- a/bundle/run/job_options_test.go +++ b/bundle/run/job_options_test.go @@ -13,7 +13,8 @@ import ( func setupJobOptions(t *testing.T) (*flag.FlagSet, *JobOptions) { var fs flag.FlagSet var opts JobOptions - opts.Define(&fs) + opts.DefineJobOptions(&fs) + opts.DefineTaskOptions(&fs) return &fs, &opts } diff --git a/bundle/run/job_test.go b/bundle/run/job_test.go index e4cb4e7e8..be189306b 100644 --- a/bundle/run/job_test.go +++ b/bundle/run/job_test.go @@ -1,12 +1,16 @@ package run import ( + "context" "testing" + "time" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/experimental/mocks" "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) @@ -47,3 +51,78 @@ func TestConvertPythonParams(t *testing.T) { require.Contains(t, opts.Job.notebookParams, "__python_params") require.Equal(t, opts.Job.notebookParams["__python_params"], `["param1","param2","param3"]`) } + +func TestJobRunnerCancel(t *testing.T) { + job := &resources.Job{ + ID: "123", + } + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test_job": job, + }, + }, + }, + } + + runner := jobRunner{key: "test", bundle: b, job: job} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + jobApi := m.GetMockJobsAPI() + jobApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + ActiveOnly: true, + JobId: 123, + }).Return([]jobs.BaseRun{ + {RunId: 1}, + {RunId: 2}, + }, nil) + + mockWait := &jobs.WaitGetRunJobTerminatedOrSkipped[struct{}]{ + Poll: func(time time.Duration, f func(j *jobs.Run)) (*jobs.Run, error) { + return nil, nil + }, + } + jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{ + RunId: 1, + }).Return(mockWait, nil) + jobApi.EXPECT().CancelRun(mock.Anything, jobs.CancelRun{ + RunId: 2, + }).Return(mockWait, nil) + + err := runner.Cancel(context.Background()) + require.NoError(t, err) +} + +func TestJobRunnerCancelWithNoActiveRuns(t *testing.T) { + job := &resources.Job{ + ID: "123", + } + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test_job": job, + }, + }, + }, + } + + runner := jobRunner{key: "test", bundle: b, job: job} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + jobApi := m.GetMockJobsAPI() + jobApi.EXPECT().ListRunsAll(mock.Anything, jobs.ListRunsRequest{ + ActiveOnly: true, + JobId: 123, + }).Return([]jobs.BaseRun{}, nil) + + jobApi.AssertNotCalled(t, "CancelRun") + + err := runner.Cancel(context.Background()) + require.NoError(t, err) +} diff --git a/bundle/run/options.go b/bundle/run/options.go index 3194fb328..4e50788a9 100644 --- a/bundle/run/options.go +++ b/bundle/run/options.go @@ -1,7 +1,8 @@ package run import ( - flag "github.com/spf13/pflag" + "github.com/databricks/cli/libs/cmdgroup" + "github.com/spf13/cobra" ) type Options struct { @@ -10,7 +11,20 @@ type Options struct { NoWait bool } -func (o *Options) Define(fs *flag.FlagSet) { - o.Job.Define(fs) - o.Pipeline.Define(fs) +func (o *Options) Define(cmd *cobra.Command) { + jobGroup := cmdgroup.NewFlagGroup("Job") + o.Job.DefineJobOptions(jobGroup.FlagSet()) + + jobTaskGroup := cmdgroup.NewFlagGroup("Job Task") + jobTaskGroup.SetDescription(`Note: please prefer use of job-level parameters (--param) over task-level parameters. + For more information, see https://docs.databricks.com/en/workflows/jobs/create-run-jobs.html#pass-parameters-to-a-databricks-job-task`) + o.Job.DefineTaskOptions(jobTaskGroup.FlagSet()) + + pipelineGroup := cmdgroup.NewFlagGroup("Pipeline") + o.Pipeline.Define(pipelineGroup.FlagSet()) + + wrappedCmd := cmdgroup.NewCommandWithGroupFlag(cmd) + wrappedCmd.AddFlagGroup(jobGroup) + wrappedCmd.AddFlagGroup(jobTaskGroup) + wrappedCmd.AddFlagGroup(pipelineGroup) } diff --git a/bundle/run/pipeline.go b/bundle/run/pipeline.go index 342a771b1..4e29b9f3f 100644 --- a/bundle/run/pipeline.go +++ b/bundle/run/pipeline.go @@ -12,6 +12,7 @@ import ( "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/spf13/cobra" ) func filterEventsByUpdateId(events []pipelines.PipelineEvent, updateId string) []pipelines.PipelineEvent { @@ -166,3 +167,30 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp time.Sleep(time.Second) } } + +func (r *pipelineRunner) Cancel(ctx context.Context) error { + w := r.bundle.WorkspaceClient() + wait, err := w.Pipelines.Stop(ctx, pipelines.StopRequest{ + PipelineId: r.pipeline.ID, + }) + + if err != nil { + return err + } + + // Waits for the Idle state of the pipeline + _, err = wait.GetWithTimeout(jobRunTimeout) + return err +} + +func (r *pipelineRunner) ParseArgs(args []string, opts *Options) error { + if len(args) == 0 { + return nil + } + + return fmt.Errorf("received %d unexpected positional arguments", len(args)) +} + +func (r *pipelineRunner) CompleteArgs(args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + return nil, cobra.ShellCompDirectiveNoFileComp +} diff --git a/bundle/run/pipeline_options.go b/bundle/run/pipeline_options.go index 4917f9db3..6c8c1e8c7 100644 --- a/bundle/run/pipeline_options.go +++ b/bundle/run/pipeline_options.go @@ -22,6 +22,9 @@ type PipelineOptions struct { // List of tables to reset and recompute. FullRefresh []string + + // Perform an update to validate graph correctness. + ValidateOnly bool } func (o *PipelineOptions) Define(fs *flag.FlagSet) { @@ -29,6 +32,7 @@ func (o *PipelineOptions) Define(fs *flag.FlagSet) { fs.StringSliceVar(&o.Refresh, "refresh", nil, "List of tables to update.") fs.BoolVar(&o.FullRefreshAll, "full-refresh-all", false, "Perform a full graph reset and recompute.") fs.StringSliceVar(&o.FullRefresh, "full-refresh", nil, "List of tables to reset and recompute.") + fs.BoolVar(&o.ValidateOnly, "validate-only", false, "Perform an update to validate graph correctness.") } // Validate returns if the combination of options is valid. @@ -46,6 +50,9 @@ func (o *PipelineOptions) Validate(pipeline *resources.Pipeline) error { if len(o.FullRefresh) > 0 { set = append(set, "--full-refresh") } + if o.ValidateOnly { + set = append(set, "--validate-only") + } if len(set) > 1 { return fmt.Errorf("pipeline run arguments are mutually exclusive (got %s)", strings.Join(set, ", ")) } @@ -63,6 +70,7 @@ func (o *PipelineOptions) toPayload(pipeline *resources.Pipeline, pipelineID str RefreshSelection: o.Refresh, FullRefresh: o.FullRefreshAll, FullRefreshSelection: o.FullRefresh, + ValidateOnly: o.ValidateOnly, } return payload, nil } diff --git a/bundle/run/pipeline_options_test.go b/bundle/run/pipeline_options_test.go index 3048a4d8c..b42de8c07 100644 --- a/bundle/run/pipeline_options_test.go +++ b/bundle/run/pipeline_options_test.go @@ -43,12 +43,20 @@ func TestPipelineOptionsFullRefresh(t *testing.T) { assert.Equal(t, []string{"arg1", "arg2", "arg3"}, opts.FullRefresh) } +func TestPipelineOptionsValidateOnly(t *testing.T) { + fs, opts := setupPipelineOptions(t) + err := fs.Parse([]string{`--validate-only`}) + require.NoError(t, err) + assert.True(t, opts.ValidateOnly) +} + func TestPipelineOptionsValidateSuccessWithSingleOption(t *testing.T) { args := []string{ `--refresh-all`, `--refresh=arg1,arg2,arg3`, `--full-refresh-all`, `--full-refresh=arg1,arg2,arg3`, + `--validate-only`, } for _, arg := range args { fs, opts := setupPipelineOptions(t) @@ -65,6 +73,7 @@ func TestPipelineOptionsValidateFailureWithMultipleOptions(t *testing.T) { `--refresh=arg1,arg2,arg3`, `--full-refresh-all`, `--full-refresh=arg1,arg2,arg3`, + `--validate-only`, } for i := range args { for j := range args { diff --git a/bundle/run/pipeline_test.go b/bundle/run/pipeline_test.go new file mode 100644 index 000000000..29b57ffdb --- /dev/null +++ b/bundle/run/pipeline_test.go @@ -0,0 +1,49 @@ +package run + +import ( + "context" + "testing" + "time" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/pipelines" + "github.com/stretchr/testify/require" +) + +func TestPipelineRunnerCancel(t *testing.T) { + pipeline := &resources.Pipeline{ + ID: "123", + } + + b := &bundle.Bundle{ + Config: config.Root{ + Resources: config.Resources{ + Pipelines: map[string]*resources.Pipeline{ + "test_pipeline": pipeline, + }, + }, + }, + } + + runner := pipelineRunner{key: "test", bundle: b, pipeline: pipeline} + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + mockWait := &pipelines.WaitGetPipelineIdle[struct{}]{ + Poll: func(time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error) { + return nil, nil + }, + } + + pipelineApi := m.GetMockPipelinesAPI() + pipelineApi.EXPECT().Stop(context.Background(), pipelines.StopRequest{ + PipelineId: "123", + }).Return(mockWait, nil) + + err := runner.Cancel(context.Background()) + require.NoError(t, err) +} diff --git a/bundle/run/runner.go b/bundle/run/runner.go index 7d3c2c297..0f202ce7d 100644 --- a/bundle/run/runner.go +++ b/bundle/run/runner.go @@ -26,6 +26,12 @@ type Runner interface { // Run the underlying worklow. Run(ctx context.Context, opts *Options) (output.RunOutput, error) + + // Cancel the underlying workflow. + Cancel(ctx context.Context) error + + // Runners support parsing and completion of additional positional arguments. + argsHandler } // Find locates a runner matching the specified argument. diff --git a/bundle/schema/docs.go b/bundle/schema/docs.go index fe63e4328..5b960ea55 100644 --- a/bundle/schema/docs.go +++ b/bundle/schema/docs.go @@ -70,7 +70,7 @@ func UpdateBundleDescriptions(openapiSpecPath string) (*Docs, error) { } openapiReader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } // Generate descriptions for the "resources" field diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json index 8d16970c5..380be0545 100644 --- a/bundle/schema/docs/bundle_descriptions.json +++ b/bundle/schema/docs/bundle_descriptions.json @@ -1,14 +1,17 @@ { - "description": "Root of the bundle config", + "description": "", "properties": { "artifacts": { - "description": "A description of all code artifacts in this bundle.", + "description": "", "additionalproperties": { "description": "", "properties": { "build": { "description": "" }, + "executable": { + "description": "" + }, "files": { "description": "", "items": { @@ -30,11 +33,33 @@ } }, "bundle": { - "description": "The details for this bundle.", + "description": "", "properties": { "compute_id": { "description": "" }, + "databricks_cli_version": { + "description": "" + }, + "deployment": { + "description": "", + "properties": { + "fail_on_active_runs": { + "description": "" + }, + "lock": { + "description": "", + "properties": { + "enabled": { + "description": "" + }, + "force": { + "description": "" + } + } + } + } + }, "git": { "description": "", "properties": { @@ -47,13 +72,24 @@ } }, "name": { - "description": "The name of the bundle." + "description": "" } } }, "experimental": { "description": "", "properties": { + "pydabs": { + "description": "", + "properties": { + "enabled": { + "description": "" + }, + "venv_path": { + "description": "" + } + } + }, "python_wheel_wrapper": { "description": "" }, @@ -62,11 +98,14 @@ "additionalproperties": { "description": "" } + }, + "use_legacy_run_as": { + "description": "" } } }, "include": { - "description": "A list of glob patterns of files to load and merge into the this configuration. Defaults to no files being included.", + "description": "", "items": { "description": "" } @@ -159,25 +198,6 @@ "additionalproperties": { "description": "", "properties": { - "compute": { - "description": "A list of compute requirements that can be referenced by tasks of this job.", - "items": { - "description": "", - "properties": { - "compute_key": { - "description": "A unique name for the compute requirement. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine the compute requirements for the task execution." - }, - "spec": { - "description": "", - "properties": { - "kind": { - "description": "The kind of compute described by this compute specification." - } - } - } - } - } - }, "continuous": { "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { @@ -190,7 +210,7 @@ "description": "Deployment information for jobs managed by external sources.", "properties": { "kind": { - "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n" + "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle." }, "metadata_file_path": { "description": "Path of the file that contains deployment metadata." @@ -201,7 +221,7 @@ "description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding." }, "edit_mode": { - "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.\n" + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified." }, "email_notifications": { "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.", @@ -227,6 +247,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -235,6 +261,31 @@ } } }, + "environments": { + "description": "A list of task execution environment specifications that can be referenced by tasks of this job.", + "items": { + "description": "", + "properties": { + "environment_key": { + "description": "The key of an environment. It has to be unique within a job." + }, + "spec": { + "description": "", + "properties": { + "client": { + "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version." + }, + "dependencies": { + "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", + "items": { + "description": "" + } + } + } + } + } + } + }, "format": { "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`." }, @@ -268,7 +319,7 @@ "description": "The source of the job specification in the remote repository when the job is source controlled.", "properties": { "dirty_state": { - "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.\n" + "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced." }, "import_from_git_branch": { "description": "Name of the branch which the job is imported from." @@ -311,10 +362,10 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { - "description": "" + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied." }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", @@ -340,13 +391,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -432,9 +483,6 @@ "cluster_name": { "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" }, - "cluster_source": { - "description": "" - }, "custom_tags": { "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "additionalproperties": { @@ -489,6 +537,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -497,6 +551,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -513,6 +575,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -619,7 +689,7 @@ } }, "max_concurrent_runs": { - "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." + "description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." }, "name": { "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding." @@ -692,13 +762,13 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Indicate whether this schedule is paused or not." }, "quartz_cron_expression": { - "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" + "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." }, "timezone_id": { - "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n" + "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required." } } }, @@ -713,9 +783,6 @@ "items": { "description": "", "properties": { - "compute_key": { - "description": "The key of the compute requirement, specified in `job.settings.compute`, to use for execution of this task." - }, "condition_task": { "description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.", "properties": { @@ -723,7 +790,7 @@ "description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference." }, "op": { - "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.\n" + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison." }, "right": { "description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference." @@ -746,18 +813,21 @@ "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." }, "project_directory": { - "description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used." + "description": "Path to the project directory. Optional for Git sourced tasks, in which\ncase if no value is provided, the root of the Git repository is used." }, "schema": { "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, + "source": { + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in Databricks workspace.\n* `GIT`: Project is located in cloud Git provider." + }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." } } }, "depends_on": { - "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n", + "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.", "items": { "description": "", "properties": { @@ -773,9 +843,15 @@ "description": { "description": "An optional description for this task." }, + "disable_auto_optimization": { + "description": "An option to disable auto optimization in serverless" + }, "email_notifications": { "description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.", "properties": { + "no_alert_for_skipped_runs": { + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." + }, "on_duration_warning_threshold_exceeded": { "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", "items": { @@ -794,6 +870,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -802,8 +884,14 @@ } } }, + "environment_key": { + "description": "The key that references an environment spec in a job. This field is required for Python script, Python wheel and dbt tasks when using serverless compute." + }, "existing_cluster_id": { - "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability" + }, + "for_each_task": { + "description": "" }, "health": { "description": "", @@ -831,7 +919,7 @@ "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`." }, "libraries": { - "description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.", + "description": "An optional list of libraries to be installed on the cluster.\nThe default value is an empty list.", "items": { "description": "", "properties": { @@ -847,10 +935,10 @@ } }, "egg": { - "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "jar": { - "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "maven": { "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", @@ -880,8 +968,11 @@ } } }, + "requirements": { + "description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`" + }, "whl": { - "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." } } } @@ -893,10 +984,10 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { - "description": "" + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied." }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", @@ -922,13 +1013,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -1014,9 +1105,6 @@ "cluster_name": { "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" }, - "cluster_source": { - "description": "" - }, "custom_tags": { "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "additionalproperties": { @@ -1071,6 +1159,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -1079,6 +1173,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -1095,6 +1197,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -1201,16 +1311,19 @@ "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", "properties": { "base_parameters": { - "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run\nNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.", "additionalproperties": { "description": "" } }, "notebook_path": { - "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" + "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + }, + "warehouse_id": { + "description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail." } } }, @@ -1232,7 +1345,7 @@ "description": "If pipeline_task, indicates that this task must execute a Pipeline.", "properties": { "full_refresh": { - "description": "If true, a full refresh will be triggered on the delta live table." + "description": "If true, triggers a full refresh on the delta live table." }, "pipeline_id": { "description": "The full name of the pipeline task to execute." @@ -1263,14 +1376,26 @@ } }, "retry_on_timeout": { - "description": "An optional policy to specify whether to retry a task when it times out." + "description": "An optional policy to specify whether to retry a job when it times out. The default behavior\nis to not retry on timeout." }, "run_if": { - "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n" + "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed" }, "run_job_task": { "description": "If run_job_task, indicates that this task must execute another job.", "properties": { + "dbt_commands": { + "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`", + "items": { + "description": "" + } + }, + "jar_params": { + "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.", + "items": { + "description": "" + } + }, "job_id": { "description": "ID of the job to trigger." }, @@ -1279,6 +1404,44 @@ "additionalproperties": { "description": "" } + }, + "notebook_params": { + "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", + "additionalproperties": { + "description": "" + } + }, + "pipeline_params": { + "description": "", + "properties": { + "full_refresh": { + "description": "If true, triggers a full refresh on the delta live table." + } + } + }, + "python_named_params": { + "description": "", + "additionalproperties": { + "description": "" + } + }, + "python_params": { + "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "items": { + "description": "" + } + }, + "spark_submit_params": { + "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "items": { + "description": "" + } + }, + "sql_params": { + "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.", + "additionalproperties": { + "description": "" + } } } }, @@ -1286,13 +1449,13 @@ "description": "If spark_jar_task, indicates that this task must run a JAR.", "properties": { "jar_uri": { - "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n" + "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create." }, "main_class_name": { "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." }, "parameters": { - "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -1303,7 +1466,7 @@ "description": "If spark_python_task, indicates that this task must run a Python file.", "properties": { "parameters": { - "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -1312,15 +1475,15 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, "spark_submit_task": { - "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n", + "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", "properties": { "parameters": { - "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -1384,10 +1547,13 @@ } }, "file": { - "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", + "description": "If file, indicates that this job runs a SQL file in a remote Git repository.", "properties": { "path": { - "description": "Relative path of the SQL file in the remote Git repository." + "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." + }, + "source": { + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider." } } }, @@ -1417,7 +1583,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -1452,6 +1618,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -1472,24 +1649,75 @@ "description": "An optional timeout applied to each run of this job. A value of `0` means no timeout." }, "trigger": { - "description": "Trigger settings for the job. Can be used to trigger a run when new files arrive in an external location. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "description": "A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "file_arrival": { "description": "File arrival trigger settings.", "properties": { "min_time_between_triggers_seconds": { - "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n" + "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds" }, "url": { "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location." }, "wait_after_last_change_seconds": { - "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n" + "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds." } } }, "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." + }, + "periodic": { + "description": "Periodic trigger settings.", + "properties": { + "interval": { + "description": "The interval at which the trigger should run." + }, + "unit": { + "description": "The unit of time for the interval." + } + } + }, + "table": { + "description": "Old table trigger settings name. Deprecated in favor of `table_update`.", + "properties": { + "condition": { + "description": "The table(s) condition based on which to trigger a job run." + }, + "min_time_between_triggers_seconds": { + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds." + }, + "table_names": { + "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "items": { + "description": "" + } + }, + "wait_after_last_change_seconds": { + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds." + } + } + }, + "table_update": { + "description": "", + "properties": { + "condition": { + "description": "The table(s) condition based on which to trigger a job run." + }, + "min_time_between_triggers_seconds": { + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds." + }, + "table_names": { + "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "items": { + "description": "" + } + }, + "wait_after_last_change_seconds": { + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds." + } + } } } }, @@ -1529,6 +1757,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -1557,21 +1796,21 @@ "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.", "properties": { "catalog_name": { - "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set." + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled." }, "enabled": { - "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again." + "description": "Indicates whether the inference table is enabled." }, "schema_name": { - "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set." + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled." }, "table_name_prefix": { - "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set." + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled." } } }, "served_entities": { - "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 10 served entities.", + "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 15 served entities.", "items": { "description": "", "properties": { @@ -1590,99 +1829,103 @@ "external_model": { "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n", "properties": { - "config": { - "description": "The config for the external model, which must match the provider.", + "ai21labs_config": { + "description": "AI21Labs Config. Only required if the provider is 'ai21labs'.", "properties": { - "ai21labs_config": { - "description": "AI21Labs Config", - "properties": { - "ai21labs_api_key": { - "description": "The Databricks secret key reference for an AI21Labs API key." - } - } + "ai21labs_api_key": { + "description": "The Databricks secret key reference for an AI21Labs API key." + } + } + }, + "amazon_bedrock_config": { + "description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.", + "properties": { + "aws_access_key_id": { + "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." }, - "anthropic_config": { - "description": "Anthropic Config", - "properties": { - "anthropic_api_key": { - "description": "The Databricks secret key reference for an Anthropic API key." - } - } + "aws_region": { + "description": "The AWS region to use. Bedrock has to be enabled there." }, - "aws_bedrock_config": { - "description": "AWS Bedrock Config", - "properties": { - "aws_access_key_id": { - "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." - }, - "aws_region": { - "description": "The AWS region to use. Bedrock has to be enabled there." - }, - "aws_secret_access_key": { - "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." - }, - "bedrock_provider": { - "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." - } - } + "aws_secret_access_key": { + "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." }, - "cohere_config": { - "description": "Cohere Config", - "properties": { - "cohere_api_key": { - "description": "The Databricks secret key reference for a Cohere API key." - } - } + "bedrock_provider": { + "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + } + } + }, + "anthropic_config": { + "description": "Anthropic Config. Only required if the provider is 'anthropic'.", + "properties": { + "anthropic_api_key": { + "description": "The Databricks secret key reference for an Anthropic API key." + } + } + }, + "cohere_config": { + "description": "Cohere Config. Only required if the provider is 'cohere'.", + "properties": { + "cohere_api_key": { + "description": "The Databricks secret key reference for a Cohere API key." + } + } + }, + "databricks_model_serving_config": { + "description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.", + "properties": { + "databricks_api_token": { + "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" }, - "databricks_model_serving_config": { - "description": "Databricks Model Serving Config", - "properties": { - "databricks_api_token": { - "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" - }, - "databricks_workspace_url": { - "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" - } - } - }, - "openai_config": { - "description": "OpenAI Config", - "properties": { - "openai_api_base": { - "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" - }, - "openai_api_key": { - "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." - }, - "openai_api_type": { - "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" - }, - "openai_api_version": { - "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" - }, - "openai_deployment_name": { - "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" - }, - "openai_organization": { - "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" - } - } - }, - "palm_config": { - "description": "PaLM Config", - "properties": { - "palm_api_key": { - "description": "The Databricks secret key reference for a PaLM API key." - } - } + "databricks_workspace_url": { + "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" } } }, "name": { "description": "The name of the external model." }, + "openai_config": { + "description": "OpenAI Config. Only required if the provider is 'openai'.", + "properties": { + "microsoft_entra_client_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n" + }, + "microsoft_entra_client_secret": { + "description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n" + }, + "microsoft_entra_tenant_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n" + }, + "openai_api_base": { + "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" + }, + "openai_api_key": { + "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." + }, + "openai_api_type": { + "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" + }, + "openai_api_version": { + "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" + }, + "openai_deployment_name": { + "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" + }, + "openai_organization": { + "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" + } + } + }, + "palm_config": { + "description": "PaLM Config. Only required if the provider is 'palm'.", + "properties": { + "palm_api_key": { + "description": "The Databricks secret key reference for a PaLM API key." + } + } + }, "provider": { - "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" }, "task": { "description": "The task type of the external model." @@ -1692,6 +1935,12 @@ "instance_profile_arn": { "description": "ARN of the instance profile that the served entity uses to access AWS resources." }, + "max_provisioned_throughput": { + "description": "The maximum tokens per second that the endpoint can scale up to." + }, + "min_provisioned_throughput": { + "description": "The minimum tokens per second that the endpoint can scale down to." + }, "name": { "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.\nIf not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other\nentities, it defaults to \u003centity-name\u003e-\u003centity-version\u003e.\n" }, @@ -1708,7 +1957,7 @@ } }, "served_models": { - "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", + "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 15 served models.", "items": { "description": "", "properties": { @@ -1722,7 +1971,7 @@ "description": "ARN of the instance profile that the served model will use to access AWS resources." }, "model_name": { - "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, \nin the form of __catalog_name__.__schema_name__.__model_name__.\n" + "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model,\nin the form of __catalog_name__.__schema_name__.__model_name__.\n" }, "model_version": { "description": "The version of the model in Databricks Model Registry or Unity Catalog to be served." @@ -1803,6 +2052,9 @@ } } }, + "route_optimized": { + "description": "Enable route optimization for the serving endpoint." + }, "tags": { "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", "items": { @@ -1958,10 +2210,13 @@ "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "properties": { "max_workers": { - "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + "description": "The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`." }, "min_workers": { - "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + "description": "The minimum number of workers the cluster can scale down to when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + }, + "mode": { + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n" } } }, @@ -1975,13 +2230,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -2090,6 +2345,94 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." + } + } + }, + "init_scripts": { + "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "items": { + "description": "", + "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "file": { + "description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`", + "properties": { + "destination": { + "description": "local file destination, e.g. `file:/my/local/file.sh`" + } + } + }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, + "s3": { + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + }, + "volumes": { + "description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`" + } + } + }, + "workspace": { + "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`" + } + } + } } } }, @@ -2138,6 +2481,17 @@ "continuous": { "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." }, + "deployment": { + "description": "Deployment type of this pipeline.", + "properties": { + "kind": { + "description": "The deployment method that manages the pipeline." + }, + "metadata_file_path": { + "description": "The path to the file containing metadata about the deployment." + } + } + }, "development": { "description": "Whether the pipeline is in Development mode. Defaults to false." }, @@ -2161,9 +2515,136 @@ } } }, + "gateway_definition": { + "description": "The definition of a gateway pipeline to support CDC.", + "properties": { + "connection_id": { + "description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source." + }, + "gateway_storage_catalog": { + "description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location." + }, + "gateway_storage_name": { + "description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n" + }, + "gateway_storage_schema": { + "description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location." + } + } + }, "id": { "description": "Unique identifier for this pipeline." }, + "ingestion_definition": { + "description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings.", + "properties": { + "connection_name": { + "description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "ingestion_gateway_id": { + "description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "objects": { + "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", + "items": { + "description": "", + "properties": { + "schema": { + "description": "Select tables from a specific source schema.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store tables." + }, + "destination_schema": { + "description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists." + }, + "source_catalog": { + "description": "The source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Required. Schema name in the source database." + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } + } + } + }, + "table": { + "description": "Select tables from a specific source table.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store table." + }, + "destination_schema": { + "description": "Required. Destination schema to store table." + }, + "destination_table": { + "description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used." + }, + "source_catalog": { + "description": "Source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Schema name in the source database. Might be optional depending on the type of source." + }, + "source_table": { + "description": "Required. Table name in the source database." + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } + } + } + } + } + } + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } + } + } + }, "libraries": { "description": "Libraries or code needed by this deployment.", "items": { @@ -2198,7 +2679,7 @@ } }, "notebook": { - "description": "The path to a notebook that defines a pipeline and is stored in the \u003cDatabricks\u003e workspace.\n", + "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.\n", "properties": { "path": { "description": "The absolute path of the notebook." @@ -2285,6 +2766,156 @@ } } }, + "quality_monitors": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "assets_dir": { + "description": "" + }, + "baseline_table_name": { + "description": "" + }, + "custom_metrics": { + "description": "", + "items": { + "description": "", + "properties": { + "definition": { + "description": "" + }, + "input_columns": { + "description": "", + "items": { + "description": "" + } + }, + "name": { + "description": "" + }, + "output_data_type": { + "description": "" + }, + "type": { + "description": "" + } + } + } + }, + "data_classification_config": { + "description": "", + "properties": { + "enabled": { + "description": "" + } + } + }, + "inference_log": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "label_col": { + "description": "" + }, + "model_id_col": { + "description": "" + }, + "prediction_col": { + "description": "" + }, + "prediction_proba_col": { + "description": "" + }, + "problem_type": { + "description": "" + }, + "timestamp_col": { + "description": "" + } + } + }, + "notifications": { + "description": "", + "properties": { + "on_failure": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + }, + "on_new_classification_tag_detected": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + } + } + }, + "output_schema_name": { + "description": "" + }, + "schedule": { + "description": "", + "properties": { + "pause_status": { + "description": "" + }, + "quartz_cron_expression": { + "description": "" + }, + "timezone_id": { + "description": "" + } + } + }, + "skip_builtin_dashboard": { + "description": "" + }, + "slicing_exprs": { + "description": "", + "items": { + "description": "" + } + }, + "snapshot": { + "description": "" + }, + "time_series": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "timestamp_col": { + "description": "" + } + } + }, + "warehouse_id": { + "description": "" + } + } + } + }, "registered_models": { "description": "List of Registered Models", "additionalproperties": { @@ -2361,13 +2992,16 @@ "description": "", "properties": { "artifacts": { - "description": "A description of all code artifacts in this bundle.", + "description": "", "additionalproperties": { "description": "", "properties": { "build": { "description": "" }, + "executable": { + "description": "" + }, "files": { "description": "", "items": { @@ -2389,11 +3023,33 @@ } }, "bundle": { - "description": "The details for this bundle.", + "description": "", "properties": { "compute_id": { "description": "" }, + "databricks_cli_version": { + "description": "" + }, + "deployment": { + "description": "", + "properties": { + "fail_on_active_runs": { + "description": "" + }, + "lock": { + "description": "", + "properties": { + "enabled": { + "description": "" + }, + "force": { + "description": "" + } + } + } + } + }, "git": { "description": "", "properties": { @@ -2406,7 +3062,7 @@ } }, "name": { - "description": "The name of the bundle." + "description": "" } } }, @@ -2518,25 +3174,6 @@ "additionalproperties": { "description": "", "properties": { - "compute": { - "description": "A list of compute requirements that can be referenced by tasks of this job.", - "items": { - "description": "", - "properties": { - "compute_key": { - "description": "A unique name for the compute requirement. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine the compute requirements for the task execution." - }, - "spec": { - "description": "", - "properties": { - "kind": { - "description": "The kind of compute described by this compute specification." - } - } - } - } - } - }, "continuous": { "description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.", "properties": { @@ -2549,7 +3186,7 @@ "description": "Deployment information for jobs managed by external sources.", "properties": { "kind": { - "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle.\n" + "description": "The kind of deployment that manages the job.\n\n* `BUNDLE`: The job is managed by Databricks Asset Bundle." }, "metadata_file_path": { "description": "Path of the file that contains deployment metadata." @@ -2560,7 +3197,7 @@ "description": "An optional description for the job. The maximum length is 1024 characters in UTF-8 encoding." }, "edit_mode": { - "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified.\n" + "description": "Edit mode of the job.\n\n* `UI_LOCKED`: The job is in a locked UI state and cannot be modified.\n* `EDITABLE`: The job is in an editable state and can be modified." }, "email_notifications": { "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted.", @@ -2586,6 +3223,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -2594,6 +3237,31 @@ } } }, + "environments": { + "description": "A list of task execution environment specifications that can be referenced by tasks of this job.", + "items": { + "description": "", + "properties": { + "environment_key": { + "description": "The key of an environment. It has to be unique within a job." + }, + "spec": { + "description": "", + "properties": { + "client": { + "description": "Client version used by the environment\nThe client is the user-facing environment of the runtime.\nEach client comes with a specific set of pre-installed libraries.\nThe version is a string, consisting of the major client version." + }, + "dependencies": { + "description": "List of pip dependencies, as supported by the version of pip in this environment.\nEach dependency is a pip requirement file line https://pip.pypa.io/en/stable/reference/requirements-file-format/\nAllowed dependency could be \u003crequirement specifier\u003e, \u003carchive url/path\u003e, \u003clocal project path\u003e(WSFS or Volumes in Databricks), \u003cvcs project url\u003e\nE.g. dependencies: [\"foo==0.0.1\", \"-r /Workspace/test/requirements.txt\"]", + "items": { + "description": "" + } + } + } + } + } + } + }, "format": { "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`." }, @@ -2627,7 +3295,7 @@ "description": "The source of the job specification in the remote repository when the job is source controlled.", "properties": { "dirty_state": { - "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced.\n" + "description": "Dirty state indicates the job is not fully synced with the job specification in the remote repository.\n\nPossible values are:\n* `NOT_SYNCED`: The job is not yet synced with the remote job specification. Import the remote job specification from UI to make the job fully synced.\n* `DISCONNECTED`: The job is temporary disconnected from the remote job specification and is allowed for live edit. Import the remote job specification again from UI to make the job fully synced." }, "import_from_git_branch": { "description": "Name of the branch which the job is imported from." @@ -2670,10 +3338,10 @@ "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a cluster that is created for each task.", "properties": { "apply_policy_default_values": { - "description": "" + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied." }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", @@ -2699,13 +3367,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -2791,9 +3459,6 @@ "cluster_name": { "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" }, - "cluster_source": { - "description": "" - }, "custom_tags": { "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "additionalproperties": { @@ -2848,6 +3513,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -2856,6 +3527,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -2872,6 +3551,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -2978,7 +3665,7 @@ } }, "max_concurrent_runs": { - "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." + "description": "An optional maximum allowed number of concurrent runs of the job.\nSet this value if you want to be able to execute multiple runs of the same job concurrently.\nThis is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs.\nHowever, from then on, new runs are skipped unless there are fewer than 3 active runs.\nThis value cannot exceed 1000. Setting this value to `0` causes all new runs to be skipped." }, "name": { "description": "An optional name for the job. The maximum length is 4096 bytes in UTF-8 encoding." @@ -3051,13 +3738,13 @@ "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Indicate whether this schedule is paused or not." }, "quartz_cron_expression": { - "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" + "description": "A Cron expression using Quartz syntax that describes the schedule for a job. See [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html) for details. This field is required." }, "timezone_id": { - "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n" + "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone. See [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details. This field is required." } } }, @@ -3072,9 +3759,6 @@ "items": { "description": "", "properties": { - "compute_key": { - "description": "The key of the compute requirement, specified in `job.settings.compute`, to use for execution of this task." - }, "condition_task": { "description": "If condition_task, specifies a condition with an outcome that can be used to control the execution of other tasks. Does not require a cluster to execute and does not support retries or notifications.", "properties": { @@ -3082,7 +3766,7 @@ "description": "The left operand of the condition task. Can be either a string value or a job state or parameter reference." }, "op": { - "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison.\n" + "description": "* `EQUAL_TO`, `NOT_EQUAL` operators perform string comparison of their operands. This means that `“12.0” == “12”` will evaluate to `false`.\n* `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL` operators perform numeric comparison of their operands. `“12.0” \u003e= “12”` will evaluate to `true`, `“10.0” \u003e= “12”` will evaluate to `false`.\n\nThe boolean comparison to task values can be implemented with operators `EQUAL_TO`, `NOT_EQUAL`. If a task value was set to a boolean value, it will be serialized to `“true”` or `“false”` for the comparison." }, "right": { "description": "The right operand of the condition task. Can be either a string value or a job state or parameter reference." @@ -3105,18 +3789,21 @@ "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." }, "project_directory": { - "description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used." + "description": "Path to the project directory. Optional for Git sourced tasks, in which\ncase if no value is provided, the root of the Git repository is used." }, "schema": { "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." }, + "source": { + "description": "Optional location type of the project directory. When set to `WORKSPACE`, the project will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the project will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Project is located in Databricks workspace.\n* `GIT`: Project is located in cloud Git provider." + }, "warehouse_id": { "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." } } }, "depends_on": { - "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.\n", + "description": "An optional array of objects specifying the dependency graph of the task. All tasks specified in this field must complete before executing this task. The task will run only if the `run_if` condition is true.\nThe key is `task_key`, and the value is the name assigned to the dependent task.", "items": { "description": "", "properties": { @@ -3132,9 +3819,15 @@ "description": { "description": "An optional description for this task." }, + "disable_auto_optimization": { + "description": "An option to disable auto optimization in serverless" + }, "email_notifications": { "description": "An optional set of email addresses that is notified when runs of this task begin or complete as well as when this task is deleted. The default behavior is to not send any emails.", "properties": { + "no_alert_for_skipped_runs": { + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." + }, "on_duration_warning_threshold_exceeded": { "description": "A list of email addresses to be notified when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. If no rule for the `RUN_DURATION_SECONDS` metric is specified in the `health` field for the job, notifications are not sent.", "items": { @@ -3153,6 +3846,12 @@ "description": "" } }, + "on_streaming_backlog_exceeded": { + "description": "A list of email addresses to notify when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.", + "items": { + "description": "" + } + }, "on_success": { "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESS` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", "items": { @@ -3161,8 +3860,14 @@ } } }, + "environment_key": { + "description": "The key that references an environment spec in a job. This field is required for Python script, Python wheel and dbt tasks when using serverless compute." + }, "existing_cluster_id": { - "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs.\nWhen running jobs or tasks on an existing cluster, you may need to manually restart\nthe cluster if it stops responding. We suggest running jobs and tasks on new clusters for\ngreater reliability" + }, + "for_each_task": { + "description": "" }, "health": { "description": "", @@ -3190,7 +3895,7 @@ "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`." }, "libraries": { - "description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.", + "description": "An optional list of libraries to be installed on the cluster.\nThe default value is an empty list.", "items": { "description": "", "properties": { @@ -3206,10 +3911,10 @@ } }, "egg": { - "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the egg library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"egg\": \"/Workspace/path/to/library.egg\" }`, `{ \"egg\" : \"/Volumes/path/to/library.egg\" }` or\n`{ \"egg\": \"s3://my-bucket/library.egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "jar": { - "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the JAR library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"jar\": \"/Workspace/path/to/library.jar\" }`, `{ \"jar\" : \"/Volumes/path/to/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." }, "maven": { "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", @@ -3239,8 +3944,11 @@ } } }, + "requirements": { + "description": "URI of the requirements.txt file to install. Only Workspace paths and Unity Catalog Volumes paths are supported.\nFor example: `{ \"requirements\": \"/Workspace/path/to/requirements.txt\" }` or `{ \"requirements\" : \"/Volumes/path/to/requirements.txt\" }`" + }, "whl": { - "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + "description": "URI of the wheel library to install. Supported URIs include Workspace paths, Unity Catalog Volumes paths, and S3 URIs.\nFor example: `{ \"whl\": \"/Workspace/path/to/library.whl\" }`, `{ \"whl\" : \"/Volumes/path/to/library.whl\" }` or\n`{ \"whl\": \"s3://my-bucket/library.whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." } } } @@ -3252,10 +3960,10 @@ "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." }, "new_cluster": { - "description": "If new_cluster, a description of a cluster that is created for only for this task.", + "description": "If new_cluster, a description of a new cluster that is created for each run.", "properties": { "apply_policy_default_values": { - "description": "" + "description": "When set to true, fixed and default values from the policy will be used for fields that are omitted. When set to false, only fixed values from the policy will be applied." }, "autoscale": { "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", @@ -3281,13 +3989,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -3373,9 +4081,6 @@ "cluster_name": { "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" }, - "cluster_source": { - "description": "" - }, "custom_tags": { "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", "additionalproperties": { @@ -3430,6 +4135,12 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." } } }, @@ -3438,6 +4149,14 @@ "items": { "description": "", "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, "dbfs": { "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", "properties": { @@ -3454,6 +4173,14 @@ } } }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, "s3": { "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", "properties": { @@ -3560,16 +4287,19 @@ "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", "properties": { "base_parameters": { - "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.\n", + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to :method:jobs/run\nNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n\nThe JSON representation of this field cannot exceed 1MB.", "additionalproperties": { "description": "" } }, "notebook_path": { - "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" + "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved from the local Databricks workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n* `WORKSPACE`: Notebook is located in Databricks workspace.\n* `GIT`: Notebook is located in cloud Git provider." + }, + "warehouse_id": { + "description": "Optional `warehouse_id` to run the notebook on a SQL warehouse. Classic SQL warehouses are NOT supported, please use serverless or pro SQL warehouses.\n\nNote that SQL warehouses only support SQL cells; if the notebook contains non-SQL cells, the run will fail." } } }, @@ -3591,7 +4321,7 @@ "description": "If pipeline_task, indicates that this task must execute a Pipeline.", "properties": { "full_refresh": { - "description": "If true, a full refresh will be triggered on the delta live table." + "description": "If true, triggers a full refresh on the delta live table." }, "pipeline_id": { "description": "The full name of the pipeline task to execute." @@ -3622,14 +4352,26 @@ } }, "retry_on_timeout": { - "description": "An optional policy to specify whether to retry a task when it times out." + "description": "An optional policy to specify whether to retry a job when it times out. The default behavior\nis to not retry on timeout." }, "run_if": { - "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed\n" + "description": "An optional value specifying the condition determining whether the task is run once its dependencies have been completed.\n\n* `ALL_SUCCESS`: All dependencies have executed and succeeded\n* `AT_LEAST_ONE_SUCCESS`: At least one dependency has succeeded\n* `NONE_FAILED`: None of the dependencies have failed and at least one was executed\n* `ALL_DONE`: All dependencies have been completed\n* `AT_LEAST_ONE_FAILED`: At least one dependency failed\n* `ALL_FAILED`: ALl dependencies have failed" }, "run_job_task": { "description": "If run_job_task, indicates that this task must execute another job.", "properties": { + "dbt_commands": { + "description": "An array of commands to execute for jobs with the dbt task, for example `\"dbt_commands\": [\"dbt deps\", \"dbt seed\", \"dbt deps\", \"dbt seed\", \"dbt run\"]`", + "items": { + "description": "" + } + }, + "jar_params": { + "description": "A list of parameters for jobs with Spark JAR tasks, for example `\"jar_params\": [\"john doe\", \"35\"]`.\nThe parameters are used to invoke the main function of the main class specified in the Spark JAR task.\nIf not specified upon `run-now`, it defaults to an empty list.\njar_params cannot be specified in conjunction with notebook_params.\nThe JSON representation of this field (for example `{\"jar_params\":[\"john doe\",\"35\"]}`) cannot exceed 10,000 bytes.\n\nUse [Task parameter variables](/jobs.html\\\"#parameter-variables\\\") to set parameters containing information about job runs.", + "items": { + "description": "" + } + }, "job_id": { "description": "ID of the job to trigger." }, @@ -3638,6 +4380,44 @@ "additionalproperties": { "description": "" } + }, + "notebook_params": { + "description": "A map from keys to values for jobs with notebook task, for example `\"notebook_params\": {\"name\": \"john doe\", \"age\": \"35\"}`.\nThe map is passed to the notebook and is accessible through the [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html) function.\n\nIf not specified upon `run-now`, the triggered run uses the job’s base parameters.\n\nnotebook_params cannot be specified in conjunction with jar_params.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nThe JSON representation of this field (for example `{\"notebook_params\":{\"name\":\"john doe\",\"age\":\"35\"}}`) cannot exceed 10,000 bytes.", + "additionalproperties": { + "description": "" + } + }, + "pipeline_params": { + "description": "", + "properties": { + "full_refresh": { + "description": "If true, triggers a full refresh on the delta live table." + } + } + }, + "python_named_params": { + "description": "", + "additionalproperties": { + "description": "" + } + }, + "python_params": { + "description": "A list of parameters for jobs with Python tasks, for example `\"python_params\": [\"john doe\", \"35\"]`.\nThe parameters are passed to Python file as command-line parameters. If specified upon `run-now`, it would overwrite\nthe parameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "items": { + "description": "" + } + }, + "spark_submit_params": { + "description": "A list of parameters for jobs with spark submit task, for example `\"spark_submit_params\": [\"--class\", \"org.apache.spark.examples.SparkPi\"]`.\nThe parameters are passed to spark-submit script as command-line parameters. If specified upon `run-now`, it would overwrite the\nparameters specified in job setting. The JSON representation of this field (for example `{\"python_params\":[\"john doe\",\"35\"]}`)\ncannot exceed 10,000 bytes.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs\n\nImportant\n\nThese parameters accept only Latin characters (ASCII character set). Using non-ASCII characters returns an error.\nExamples of invalid, non-ASCII characters are Chinese, Japanese kanjis, and emojis.", + "items": { + "description": "" + } + }, + "sql_params": { + "description": "A map from keys to values for jobs with SQL task, for example `\"sql_params\": {\"name\": \"john doe\", \"age\": \"35\"}`. The SQL alert task does not support custom parameters.", + "additionalproperties": { + "description": "" + } } } }, @@ -3645,13 +4425,13 @@ "description": "If spark_jar_task, indicates that this task must run a JAR.", "properties": { "jar_uri": { - "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n" + "description": "Deprecated since 04/2016. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create." }, "main_class_name": { "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." }, "parameters": { - "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -3662,7 +4442,7 @@ "description": "If spark_python_task, indicates that this task must run a Python file.", "properties": { "parameters": { - "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -3671,15 +4451,15 @@ "description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required." }, "source": { - "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n" + "description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved from the local\nDatabricks workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a Databricks workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository." } } }, "spark_submit_task": { - "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations. \n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.\n", + "description": "If `spark_submit_task`, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.\n\nIn the `new_cluster` specification, `libraries` and `spark_conf` are not supported. Instead, use `--jars` and `--py-files` to add Java and Python libraries and `--conf` to set the Spark configurations.\n\n`master`, `deploy-mode`, and `executor-cores` are automatically configured by Databricks; you _cannot_ specify them in parameters.\n\nBy default, the Spark submit job uses all available memory (excluding reserved memory for Databricks services). You can set `--driver-memory`, and `--executor-memory` to a smaller value to leave some room for off-heap usage.\n\nThe `--jars`, `--py-files`, `--files` arguments support DBFS and S3 paths.", "properties": { "parameters": { - "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.", "items": { "description": "" } @@ -3743,10 +4523,13 @@ } }, "file": { - "description": "If file, indicates that this job runs a SQL file in a remote Git repository. Only one SQL statement is supported in a file. Multiple SQL statements separated by semicolons (;) are not permitted.", + "description": "If file, indicates that this job runs a SQL file in a remote Git repository.", "properties": { "path": { - "description": "Relative path of the SQL file in the remote Git repository." + "description": "Path of the SQL file. Must be relative if the source is a remote Git repository and absolute for workspace paths." + }, + "source": { + "description": "Optional location type of the SQL file. When set to `WORKSPACE`, the SQL file will be retrieved\nfrom the local Databricks workspace. When set to `GIT`, the SQL file will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: SQL file is located in Databricks workspace.\n* `GIT`: SQL file is located in cloud Git provider." } } }, @@ -3776,7 +4559,7 @@ "description": "An optional timeout applied to each run of this job task. A value of `0` means no timeout." }, "webhook_notifications": { - "description": "A collection of system notification IDs to notify when runs of this job begin or complete.", + "description": "A collection of system notification IDs to notify when runs of this task begin or complete. The default behavior is to not send any system notifications.", "properties": { "on_duration_warning_threshold_exceeded": { "description": "An optional list of system notification IDs to call when the duration of a run exceeds the threshold specified for the `RUN_DURATION_SECONDS` metric in the `health` field. A maximum of 3 destinations can be specified for the `on_duration_warning_threshold_exceeded` property.", @@ -3811,6 +4594,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -3831,24 +4625,75 @@ "description": "An optional timeout applied to each run of this job. A value of `0` means no timeout." }, "trigger": { - "description": "Trigger settings for the job. Can be used to trigger a run when new files arrive in an external location. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "description": "A configuration to trigger a run when certain conditions are met. The default behavior is that the job runs only when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", "properties": { "file_arrival": { "description": "File arrival trigger settings.", "properties": { "min_time_between_triggers_seconds": { - "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds\n" + "description": "If set, the trigger starts a run only after the specified amount of time passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds" }, "url": { "description": "URL to be monitored for file arrivals. The path must point to the root or a subpath of the external location." }, "wait_after_last_change_seconds": { - "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds.\n" + "description": "If set, the trigger starts a run only after no file activity has occurred for the specified amount of time.\nThis makes it possible to wait for a batch of incoming files to arrive before triggering a run. The\nminimum allowed value is 60 seconds." } } }, "pause_status": { - "description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED." + "description": "Whether this trigger is paused or not." + }, + "periodic": { + "description": "Periodic trigger settings.", + "properties": { + "interval": { + "description": "The interval at which the trigger should run." + }, + "unit": { + "description": "The unit of time for the interval." + } + } + }, + "table": { + "description": "Old table trigger settings name. Deprecated in favor of `table_update`.", + "properties": { + "condition": { + "description": "The table(s) condition based on which to trigger a job run." + }, + "min_time_between_triggers_seconds": { + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds." + }, + "table_names": { + "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "items": { + "description": "" + } + }, + "wait_after_last_change_seconds": { + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds." + } + } + }, + "table_update": { + "description": "", + "properties": { + "condition": { + "description": "The table(s) condition based on which to trigger a job run." + }, + "min_time_between_triggers_seconds": { + "description": "If set, the trigger starts a run only after the specified amount of time has passed since\nthe last time the trigger fired. The minimum allowed value is 60 seconds." + }, + "table_names": { + "description": "A list of Delta tables to monitor for changes. The table name must be in the format `catalog_name.schema_name.table_name`.", + "items": { + "description": "" + } + }, + "wait_after_last_change_seconds": { + "description": "If set, the trigger starts a run only after no table updates have occurred for the specified time\nand can be used to wait for a series of table updates before triggering a run. The\nminimum allowed value is 60 seconds." + } + } } } }, @@ -3888,6 +4733,17 @@ } } }, + "on_streaming_backlog_exceeded": { + "description": "An optional list of system notification IDs to call when any streaming backlog thresholds are exceeded for any stream.\nStreaming backlog thresholds can be set in the `health` field using the following metrics: `STREAMING_BACKLOG_BYTES`, `STREAMING_BACKLOG_RECORDS`, `STREAMING_BACKLOG_SECONDS`, or `STREAMING_BACKLOG_FILES`.\nAlerting is based on the 10-minute average of these metrics. If the issue persists, notifications are resent every 30 minutes.\nA maximum of 3 destinations can be specified for the `on_streaming_backlog_exceeded` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, "on_success": { "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", "items": { @@ -3916,21 +4772,21 @@ "description": "Configuration for Inference Tables which automatically logs requests and responses to Unity Catalog.", "properties": { "catalog_name": { - "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if it was already set." + "description": "The name of the catalog in Unity Catalog. NOTE: On update, you cannot change the catalog name if the inference table is already enabled." }, "enabled": { - "description": "If inference tables are enabled or not. NOTE: If you have already disabled payload logging once, you cannot enable again." + "description": "Indicates whether the inference table is enabled." }, "schema_name": { - "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if it was already set." + "description": "The name of the schema in Unity Catalog. NOTE: On update, you cannot change the schema name if the inference table is already enabled." }, "table_name_prefix": { - "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if it was already set." + "description": "The prefix of the table in Unity Catalog. NOTE: On update, you cannot change the prefix name if the inference table is already enabled." } } }, "served_entities": { - "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 10 served entities.", + "description": "A list of served entities for the endpoint to serve. A serving endpoint can have up to 15 served entities.", "items": { "description": "", "properties": { @@ -3949,99 +4805,103 @@ "external_model": { "description": "The external model to be served. NOTE: Only one of external_model and (entity_name, entity_version, workload_size, workload_type, and scale_to_zero_enabled)\ncan be specified with the latter set being used for custom model serving for a Databricks registered model. When an external_model is present, the served\nentities list can only have one served_entity object. For an existing endpoint with external_model, it can not be updated to an endpoint without external_model.\nIf the endpoint is created without external_model, users cannot update it to add external_model later.\n", "properties": { - "config": { - "description": "The config for the external model, which must match the provider.", + "ai21labs_config": { + "description": "AI21Labs Config. Only required if the provider is 'ai21labs'.", "properties": { - "ai21labs_config": { - "description": "AI21Labs Config", - "properties": { - "ai21labs_api_key": { - "description": "The Databricks secret key reference for an AI21Labs API key." - } - } + "ai21labs_api_key": { + "description": "The Databricks secret key reference for an AI21Labs API key." + } + } + }, + "amazon_bedrock_config": { + "description": "Amazon Bedrock Config. Only required if the provider is 'amazon-bedrock'.", + "properties": { + "aws_access_key_id": { + "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." }, - "anthropic_config": { - "description": "Anthropic Config", - "properties": { - "anthropic_api_key": { - "description": "The Databricks secret key reference for an Anthropic API key." - } - } + "aws_region": { + "description": "The AWS region to use. Bedrock has to be enabled there." }, - "aws_bedrock_config": { - "description": "AWS Bedrock Config", - "properties": { - "aws_access_key_id": { - "description": "The Databricks secret key reference for an AWS Access Key ID with permissions to interact with Bedrock services." - }, - "aws_region": { - "description": "The AWS region to use. Bedrock has to be enabled there." - }, - "aws_secret_access_key": { - "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." - }, - "bedrock_provider": { - "description": "The underlying provider in AWS Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." - } - } + "aws_secret_access_key": { + "description": "The Databricks secret key reference for an AWS Secret Access Key paired with the access key ID, with permissions to interact with Bedrock services." }, - "cohere_config": { - "description": "Cohere Config", - "properties": { - "cohere_api_key": { - "description": "The Databricks secret key reference for a Cohere API key." - } - } + "bedrock_provider": { + "description": "The underlying provider in Amazon Bedrock. Supported values (case insensitive) include: Anthropic, Cohere, AI21Labs, Amazon." + } + } + }, + "anthropic_config": { + "description": "Anthropic Config. Only required if the provider is 'anthropic'.", + "properties": { + "anthropic_api_key": { + "description": "The Databricks secret key reference for an Anthropic API key." + } + } + }, + "cohere_config": { + "description": "Cohere Config. Only required if the provider is 'cohere'.", + "properties": { + "cohere_api_key": { + "description": "The Databricks secret key reference for a Cohere API key." + } + } + }, + "databricks_model_serving_config": { + "description": "Databricks Model Serving Config. Only required if the provider is 'databricks-model-serving'.", + "properties": { + "databricks_api_token": { + "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" }, - "databricks_model_serving_config": { - "description": "Databricks Model Serving Config", - "properties": { - "databricks_api_token": { - "description": "The Databricks secret key reference for a Databricks API token that corresponds to a user or service\nprincipal with Can Query access to the model serving endpoint pointed to by this external model.\n" - }, - "databricks_workspace_url": { - "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" - } - } - }, - "openai_config": { - "description": "OpenAI Config", - "properties": { - "openai_api_base": { - "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" - }, - "openai_api_key": { - "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." - }, - "openai_api_type": { - "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" - }, - "openai_api_version": { - "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" - }, - "openai_deployment_name": { - "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" - }, - "openai_organization": { - "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" - } - } - }, - "palm_config": { - "description": "PaLM Config", - "properties": { - "palm_api_key": { - "description": "The Databricks secret key reference for a PaLM API key." - } - } + "databricks_workspace_url": { + "description": "The URL of the Databricks workspace containing the model serving endpoint pointed to by this external model.\n" } } }, "name": { "description": "The name of the external model." }, + "openai_config": { + "description": "OpenAI Config. Only required if the provider is 'openai'.", + "properties": { + "microsoft_entra_client_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Client ID.\n" + }, + "microsoft_entra_client_secret": { + "description": "The Databricks secret key reference for the Microsoft Entra Client Secret that is\nonly required for Azure AD OpenAI.\n" + }, + "microsoft_entra_tenant_id": { + "description": "This field is only required for Azure AD OpenAI and is the Microsoft Entra Tenant ID.\n" + }, + "openai_api_base": { + "description": "This is the base URL for the OpenAI API (default: \"https://api.openai.com/v1\").\nFor Azure OpenAI, this field is required, and is the base URL for the Azure OpenAI API service\nprovided by Azure.\n" + }, + "openai_api_key": { + "description": "The Databricks secret key reference for an OpenAI or Azure OpenAI API key." + }, + "openai_api_type": { + "description": "This is an optional field to specify the type of OpenAI API to use.\nFor Azure OpenAI, this field is required, and adjust this parameter to represent the preferred security\naccess validation protocol. For access token validation, use azure. For authentication using Azure Active\nDirectory (Azure AD) use, azuread.\n" + }, + "openai_api_version": { + "description": "This is an optional field to specify the OpenAI API version.\nFor Azure OpenAI, this field is required, and is the version of the Azure OpenAI service to\nutilize, specified by a date.\n" + }, + "openai_deployment_name": { + "description": "This field is only required for Azure OpenAI and is the name of the deployment resource for the\nAzure OpenAI service.\n" + }, + "openai_organization": { + "description": "This is an optional field to specify the organization in OpenAI or Azure OpenAI.\n" + } + } + }, + "palm_config": { + "description": "PaLM Config. Only required if the provider is 'palm'.", + "properties": { + "palm_api_key": { + "description": "The Databricks secret key reference for a PaLM API key." + } + } + }, "provider": { - "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'aws-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" + "description": "The name of the provider for the external model. Currently, the supported providers are 'ai21labs', 'anthropic',\n'amazon-bedrock', 'cohere', 'databricks-model-serving', 'openai', and 'palm'.\",\n" }, "task": { "description": "The task type of the external model." @@ -4051,6 +4911,12 @@ "instance_profile_arn": { "description": "ARN of the instance profile that the served entity uses to access AWS resources." }, + "max_provisioned_throughput": { + "description": "The maximum tokens per second that the endpoint can scale up to." + }, + "min_provisioned_throughput": { + "description": "The minimum tokens per second that the endpoint can scale down to." + }, "name": { "description": "The name of a served entity. It must be unique across an endpoint. A served entity name can consist of alphanumeric characters, dashes, and underscores.\nIf not specified for an external model, this field defaults to external_model.name, with '.' and ':' replaced with '-', and if not specified for other\nentities, it defaults to \u003centity-name\u003e-\u003centity-version\u003e.\n" }, @@ -4067,7 +4933,7 @@ } }, "served_models": { - "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 10 served models.", + "description": "(Deprecated, use served_entities instead) A list of served models for the endpoint to serve. A serving endpoint can have up to 15 served models.", "items": { "description": "", "properties": { @@ -4081,7 +4947,7 @@ "description": "ARN of the instance profile that the served model will use to access AWS resources." }, "model_name": { - "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model, \nin the form of __catalog_name__.__schema_name__.__model_name__.\n" + "description": "The name of the model in Databricks Model Registry to be served or if the model resides in Unity Catalog, the full name of model,\nin the form of __catalog_name__.__schema_name__.__model_name__.\n" }, "model_version": { "description": "The version of the model in Databricks Model Registry or Unity Catalog to be served." @@ -4162,6 +5028,9 @@ } } }, + "route_optimized": { + "description": "Enable route optimization for the serving endpoint." + }, "tags": { "description": "Tags to be attached to the serving endpoint and automatically propagated to billing logs.", "items": { @@ -4317,10 +5186,13 @@ "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", "properties": { "max_workers": { - "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + "description": "The maximum number of workers to which the cluster can scale up when overloaded. `max_workers` must be strictly greater than `min_workers`." }, "min_workers": { - "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + "description": "The minimum number of workers the cluster can scale down to when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + }, + "mode": { + "description": "Databricks Enhanced Autoscaling optimizes cluster utilization by automatically\nallocating cluster resources based on workload volume, with minimal impact to\nthe data processing latency of your pipelines. Enhanced Autoscaling is available\nfor `updates` clusters only. The legacy autoscaling feature is used for `maintenance`\nclusters.\n" } } }, @@ -4334,13 +5206,13 @@ "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." }, "ebs_volume_iops": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what IOPS to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_size": { "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." }, "ebs_volume_throughput": { - "description": "\u003cneeds content added\u003e" + "description": "If using gp3 volumes, what throughput to use for the disk. If this is not set, the maximum performance of a gp2 volume with the same volume size will be used." }, "ebs_volume_type": { "description": "" @@ -4449,6 +5321,94 @@ }, "local_ssd_count": { "description": "If provided, each node (workers and driver) in the cluster will have this number of local SSDs attached. Each local SSD is 375GB in size. Refer to [GCP documentation](https://cloud.google.com/compute/docs/disks/local-ssd#choose_number_local_ssds) for the supported number of local SSDs for each instance type." + }, + "use_preemptible_executors": { + "description": "This field determines whether the spark executors will be scheduled to run on preemptible VMs (when set to true) versus standard compute engine VMs (when set to false; default).\nNote: Soon to be deprecated, use the availability field instead." + }, + "zone_id": { + "description": "Identifier for the availability zone in which the cluster resides.\nThis can be one of the following:\n- \"HA\" =\u003e High availability, spread nodes across availability zones for a Databricks deployment region [default]\n- \"AUTO\" =\u003e Databricks picks an availability zone to schedule the cluster on.\n- A GCP availability zone =\u003e Pick One of the available zones for (machine type + region) from https://cloud.google.com/compute/docs/regions-zones." + } + } + }, + "init_scripts": { + "description": "The configuration for storing init scripts. Any number of destinations can be specified. The scripts are executed sequentially in the order provided. If `cluster_log_conf` is specified, init script logs are sent to `\u003cdestination\u003e/\u003ccluster-ID\u003e/init_scripts`.", + "items": { + "description": "", + "properties": { + "abfss": { + "description": "destination needs to be provided. e.g.\n`{ \"abfss\" : { \"destination\" : \"abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e\" } }", + "properties": { + "destination": { + "description": "abfss destination, e.g. `abfss://\u003ccontainer-name\u003e@\u003cstorage-account-name\u003e.dfs.core.windows.net/\u003cdirectory-name\u003e`." + } + } + }, + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "file": { + "description": "destination needs to be provided. e.g.\n`{ \"file\" : { \"destination\" : \"file:/my/local/file.sh\" } }`", + "properties": { + "destination": { + "description": "local file destination, e.g. `file:/my/local/file.sh`" + } + } + }, + "gcs": { + "description": "destination needs to be provided. e.g.\n`{ \"gcs\": { \"destination\": \"gs://my-bucket/file.sh\" } }`", + "properties": { + "destination": { + "description": "GCS destination/URI, e.g. `gs://my-bucket/some-prefix`" + } + } + }, + "s3": { + "description": "destination and either the region or endpoint need to be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + }, + "volumes": { + "description": "destination needs to be provided. e.g.\n`{ \"volumes\" : { \"destination\" : \"/Volumes/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh`" + } + } + }, + "workspace": { + "description": "destination needs to be provided. e.g.\n`{ \"workspace\" : { \"destination\" : \"/Users/user1@databricks.com/my-init.sh\" } }`", + "properties": { + "destination": { + "description": "workspace files destination, e.g. `/Users/user1@databricks.com/my-init.sh`" + } + } + } } } }, @@ -4497,6 +5457,17 @@ "continuous": { "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." }, + "deployment": { + "description": "Deployment type of this pipeline.", + "properties": { + "kind": { + "description": "The deployment method that manages the pipeline." + }, + "metadata_file_path": { + "description": "The path to the file containing metadata about the deployment." + } + } + }, "development": { "description": "Whether the pipeline is in Development mode. Defaults to false." }, @@ -4520,9 +5491,136 @@ } } }, + "gateway_definition": { + "description": "The definition of a gateway pipeline to support CDC.", + "properties": { + "connection_id": { + "description": "Immutable. The Unity Catalog connection this gateway pipeline uses to communicate with the source." + }, + "gateway_storage_catalog": { + "description": "Required, Immutable. The name of the catalog for the gateway pipeline's storage location." + }, + "gateway_storage_name": { + "description": "Required. The Unity Catalog-compatible naming for the gateway storage location.\nThis is the destination to use for the data that is extracted by the gateway.\nDelta Live Tables system will automatically create the storage location under the catalog and schema.\n" + }, + "gateway_storage_schema": { + "description": "Required, Immutable. The name of the schema for the gateway pipelines's storage location." + } + } + }, "id": { "description": "Unique identifier for this pipeline." }, + "ingestion_definition": { + "description": "The configuration for a managed ingestion pipeline. These settings cannot be used with the 'libraries', 'target' or 'catalog' settings.", + "properties": { + "connection_name": { + "description": "Immutable. The Unity Catalog connection this ingestion pipeline uses to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "ingestion_gateway_id": { + "description": "Immutable. Identifier for the ingestion gateway used by this ingestion pipeline to communicate with the source. Specify either ingestion_gateway_id or connection_name." + }, + "objects": { + "description": "Required. Settings specifying tables to replicate and the destination for the replicated tables.", + "items": { + "description": "", + "properties": { + "schema": { + "description": "Select tables from a specific source schema.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store tables." + }, + "destination_schema": { + "description": "Required. Destination schema to store tables in. Tables with the same name as the source tables are created in this destination schema. The pipeline fails If a table with the same name already exists." + }, + "source_catalog": { + "description": "The source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Required. Schema name in the source database." + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in this schema and override the table_configuration defined in the ManagedIngestionPipelineDefinition object.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } + } + } + }, + "table": { + "description": "Select tables from a specific source table.", + "properties": { + "destination_catalog": { + "description": "Required. Destination catalog to store table." + }, + "destination_schema": { + "description": "Required. Destination schema to store table." + }, + "destination_table": { + "description": "Optional. Destination table name. The pipeline fails If a table with that name already exists. If not set, the source table name is used." + }, + "source_catalog": { + "description": "Source catalog name. Might be optional depending on the type of source." + }, + "source_schema": { + "description": "Schema name in the source database. Might be optional depending on the type of source." + }, + "source_table": { + "description": "Required. Table name in the source database." + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings override the table_configuration defined in the ManagedIngestionPipelineDefinition object and the SchemaSpec.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } + } + } + } + } + } + }, + "table_configuration": { + "description": "Configuration settings to control the ingestion of tables. These settings are applied to all tables in the pipeline.", + "properties": { + "primary_keys": { + "description": "The primary key of the table used to apply changes.", + "items": { + "description": "" + } + }, + "salesforce_include_formula_fields": { + "description": "If true, formula fields defined in the table are included in the ingestion. This setting is only valid for the Salesforce connector" + }, + "scd_type": { + "description": "The SCD type to use to ingest the table." + } + } + } + } + }, "libraries": { "description": "Libraries or code needed by this deployment.", "items": { @@ -4557,7 +5655,7 @@ } }, "notebook": { - "description": "The path to a notebook that defines a pipeline and is stored in the \u003cDatabricks\u003e workspace.\n", + "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.\n", "properties": { "path": { "description": "The absolute path of the notebook." @@ -4644,6 +5742,156 @@ } } }, + "quality_monitors": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "assets_dir": { + "description": "" + }, + "baseline_table_name": { + "description": "" + }, + "custom_metrics": { + "description": "", + "items": { + "description": "", + "properties": { + "definition": { + "description": "" + }, + "input_columns": { + "description": "", + "items": { + "description": "" + } + }, + "name": { + "description": "" + }, + "output_data_type": { + "description": "" + }, + "type": { + "description": "" + } + } + } + }, + "data_classification_config": { + "description": "", + "properties": { + "enabled": { + "description": "" + } + } + }, + "inference_log": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "label_col": { + "description": "" + }, + "model_id_col": { + "description": "" + }, + "prediction_col": { + "description": "" + }, + "prediction_proba_col": { + "description": "" + }, + "problem_type": { + "description": "" + }, + "timestamp_col": { + "description": "" + } + } + }, + "notifications": { + "description": "", + "properties": { + "on_failure": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + }, + "on_new_classification_tag_detected": { + "description": "", + "properties": { + "email_addresses": { + "description": "", + "items": { + "description": "" + } + } + } + } + } + }, + "output_schema_name": { + "description": "" + }, + "schedule": { + "description": "", + "properties": { + "pause_status": { + "description": "" + }, + "quartz_cron_expression": { + "description": "" + }, + "timezone_id": { + "description": "" + } + } + }, + "skip_builtin_dashboard": { + "description": "" + }, + "slicing_exprs": { + "description": "", + "items": { + "description": "" + } + }, + "snapshot": { + "description": "" + }, + "time_series": { + "description": "", + "properties": { + "granularities": { + "description": "", + "items": { + "description": "" + } + }, + "timestamp_col": { + "description": "" + } + } + }, + "warehouse_id": { + "description": "" + } + } + } + }, "registered_models": { "description": "List of Registered Models", "additionalproperties": { @@ -4717,14 +5965,60 @@ "variables": { "description": "", "additionalproperties": { - "description": "" + "description": "", + "properties": { + "default": { + "description": "" + }, + "description": { + "description": "" + }, + "lookup": { + "description": "", + "properties": { + "alert": { + "description": "" + }, + "cluster": { + "description": "" + }, + "cluster_policy": { + "description": "" + }, + "dashboard": { + "description": "" + }, + "instance_pool": { + "description": "" + }, + "job": { + "description": "" + }, + "metastore": { + "description": "" + }, + "pipeline": { + "description": "" + }, + "query": { + "description": "" + }, + "service_principal": { + "description": "" + }, + "warehouse": { + "description": "" + } + } + } + } } }, "workspace": { - "description": "Configures which workspace to connect to and locations for files, state, and similar locations within the workspace file tree.", + "description": "", "properties": { "artifact_path": { - "description": "The remote path to synchronize build artifacts to. This defaults to `${workspace.root}/artifacts`" + "description": "" }, "auth_type": { "description": "" @@ -4733,10 +6027,10 @@ "description": "" }, "azure_environment": { - "description": "Azure environment, one of (Public, UsGov, China, Germany)." + "description": "" }, "azure_login_app_id": { - "description": "Azure Login Application ID." + "description": "" }, "azure_tenant_id": { "description": "" @@ -4745,28 +6039,28 @@ "description": "" }, "azure_workspace_resource_id": { - "description": "Azure Resource Manager ID for Azure Databricks workspace." + "description": "" }, "client_id": { "description": "" }, "file_path": { - "description": "The remote path to synchronize local files artifacts to. This defaults to `${workspace.root}/files`" + "description": "" }, "google_service_account": { "description": "" }, "host": { - "description": "Host url of the workspace." + "description": "" }, "profile": { - "description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg." + "description": "" }, "root_path": { - "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`" + "description": "" }, "state_path": { - "description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`" + "description": "" } } } @@ -4826,10 +6120,10 @@ } }, "workspace": { - "description": "Configures which workspace to connect to and locations for files, state, and similar locations within the workspace file tree.", + "description": "", "properties": { "artifact_path": { - "description": "The remote path to synchronize build artifacts to. This defaults to `${workspace.root}/artifacts`" + "description": "" }, "auth_type": { "description": "" @@ -4838,10 +6132,10 @@ "description": "" }, "azure_environment": { - "description": "Azure environment, one of (Public, UsGov, China, Germany)." + "description": "" }, "azure_login_app_id": { - "description": "Azure Login Application ID." + "description": "" }, "azure_tenant_id": { "description": "" @@ -4850,28 +6144,28 @@ "description": "" }, "azure_workspace_resource_id": { - "description": "Azure Resource Manager ID for Azure Databricks workspace." + "description": "" }, "client_id": { "description": "" }, "file_path": { - "description": "The remote path to synchronize local files artifacts to. This defaults to `${workspace.root}/files`" + "description": "" }, "google_service_account": { "description": "" }, "host": { - "description": "Host url of the workspace." + "description": "" }, "profile": { - "description": "Connection profile to use. By default profiles are specified in ~/.databrickscfg." + "description": "" }, "root_path": { - "description": "The base location for synchronizing files, artifacts and state. Defaults to `/Users/jane@doe.com/.bundle/${bundle.name}/${bundle.target}`" + "description": "" }, "state_path": { - "description": "The remote path to synchronize bundle state to. This defaults to `${workspace.root}/state`" + "description": "" } } } diff --git a/bundle/schema/openapi.go b/bundle/schema/openapi.go index 0b64c43e3..1756d5165 100644 --- a/bundle/schema/openapi.go +++ b/bundle/schema/openapi.go @@ -10,17 +10,21 @@ import ( ) type OpenapiReader struct { + // OpenAPI spec to read schemas from. OpenapiSpec *openapi.Specification - Memo map[string]*jsonschema.Schema + + // In-memory cache of schemas read from the OpenAPI spec. + memo map[string]jsonschema.Schema } const SchemaPathPrefix = "#/components/schemas/" -func (reader *OpenapiReader) readOpenapiSchema(path string) (*jsonschema.Schema, error) { +// Read a schema directly from the OpenAPI spec. +func (reader *OpenapiReader) readOpenapiSchema(path string) (jsonschema.Schema, error) { schemaKey := strings.TrimPrefix(path, SchemaPathPrefix) // return early if we already have a computed schema - memoSchema, ok := reader.Memo[schemaKey] + memoSchema, ok := reader.memo[schemaKey] if ok { return memoSchema, nil } @@ -28,18 +32,18 @@ func (reader *OpenapiReader) readOpenapiSchema(path string) (*jsonschema.Schema, // check path is present in openapi spec openapiSchema, ok := reader.OpenapiSpec.Components.Schemas[schemaKey] if !ok { - return nil, fmt.Errorf("schema with path %s not found in openapi spec", path) + return jsonschema.Schema{}, fmt.Errorf("schema with path %s not found in openapi spec", path) } // convert openapi schema to the native schema struct bytes, err := json.Marshal(*openapiSchema) if err != nil { - return nil, err + return jsonschema.Schema{}, err } - jsonSchema := &jsonschema.Schema{} - err = json.Unmarshal(bytes, jsonSchema) + jsonSchema := jsonschema.Schema{} + err = json.Unmarshal(bytes, &jsonSchema) if err != nil { - return nil, err + return jsonschema.Schema{}, err } // A hack to convert a map[string]interface{} to *Schema @@ -49,28 +53,40 @@ func (reader *OpenapiReader) readOpenapiSchema(path string) (*jsonschema.Schema, if ok { b, err := json.Marshal(jsonSchema.AdditionalProperties) if err != nil { - return nil, err + return jsonschema.Schema{}, err } additionalProperties := &jsonschema.Schema{} err = json.Unmarshal(b, additionalProperties) if err != nil { - return nil, err + return jsonschema.Schema{}, err } jsonSchema.AdditionalProperties = additionalProperties } // store read schema into memo - reader.Memo[schemaKey] = jsonSchema + reader.memo[schemaKey] = jsonSchema return jsonSchema, nil } -// safe againt loops in refs +// Resolve all nested "$ref" references in the schema. This function unrolls a single +// level of "$ref" in the schema and calls into traverseSchema to resolve nested references. +// Thus this function and traverseSchema are mutually recursive. +// +// This function is safe against reference loops. If a reference loop is detected, an error +// is returned. func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) { if root.Reference == nil { return reader.traverseSchema(root, tracker) } key := *root.Reference + + // HACK to unblock CLI release (13th Feb 2024). This is temporary until proper + // support for recursive types is added to the docs generator. PR: https://github.com/databricks/cli/pull/1204 + if strings.Contains(key, "ForEachTask") { + return root, nil + } + if tracker.hasCycle(key) { // self reference loops can be supported however the logic is non-trivial because // cross refernce loops are not allowed (see: http://json-schema.org/understanding-json-schema/structuring.html#recursion) @@ -84,12 +100,12 @@ func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *t // in the memo root.Reference = nil - // unroll one level of reference + // unroll one level of reference. selfRef, err := reader.readOpenapiSchema(ref) if err != nil { return nil, err } - root = selfRef + root = &selfRef root.Description = description // traverse again to find new references @@ -101,6 +117,8 @@ func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *t return root, err } +// Traverse the nested properties of the schema to resolve "$ref" references. This function +// and safeResolveRefs are mutually recursive. func (reader *OpenapiReader) traverseSchema(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) { // case primitive (or invalid) if root.Type != jsonschema.ObjectType && root.Type != jsonschema.ArrayType { @@ -147,11 +165,11 @@ func (reader *OpenapiReader) readResolvedSchema(path string) (*jsonschema.Schema } tracker := newTracker() tracker.push(path, path) - root, err = reader.safeResolveRefs(root, tracker) + resolvedRoot, err := reader.safeResolveRefs(&root, tracker) if err != nil { return nil, tracker.errWithTrace(err.Error(), "") } - return root, nil + return resolvedRoot, nil } func (reader *OpenapiReader) jobsDocs() (*Docs, error) { diff --git a/bundle/schema/openapi_test.go b/bundle/schema/openapi_test.go index 0d71fa440..359b1e58a 100644 --- a/bundle/schema/openapi_test.go +++ b/bundle/schema/openapi_test.go @@ -48,7 +48,7 @@ func TestReadSchemaForObject(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -106,7 +106,7 @@ func TestReadSchemaForArray(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -152,7 +152,7 @@ func TestReadSchemaForMap(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -201,7 +201,7 @@ func TestRootReferenceIsResolved(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -251,7 +251,7 @@ func TestSelfReferenceLoopErrors(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -285,7 +285,7 @@ func TestCrossReferenceLoopErrors(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -330,7 +330,7 @@ func TestReferenceResolutionForMapInObject(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -400,7 +400,7 @@ func TestReferenceResolutionForArrayInObject(t *testing.T) { spec := &openapi.Specification{} reader := &OpenapiReader{ OpenapiSpec: spec, - Memo: make(map[string]*jsonschema.Schema), + memo: make(map[string]jsonschema.Schema), } err := json.Unmarshal([]byte(specString), spec) require.NoError(t, err) @@ -434,3 +434,61 @@ func TestReferenceResolutionForArrayInObject(t *testing.T) { t.Log("[DEBUG] expected: ", expected) assert.Equal(t, expected, string(fruitsSchemaJson)) } + +func TestReferenceResolutionDoesNotOverwriteDescriptions(t *testing.T) { + specString := `{ + "components": { + "schemas": { + "foo": { + "type": "number" + }, + "fruits": { + "type": "object", + "properties": { + "guava": { + "type": "object", + "description": "Guava is a fruit", + "$ref": "#/components/schemas/foo" + }, + "mango": { + "type": "object", + "description": "What is a mango?", + "$ref": "#/components/schemas/foo" + } + } + } + } + } + }` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + memo: make(map[string]jsonschema.Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits") + require.NoError(t, err) + + fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ") + require.NoError(t, err) + + expected := `{ + "type": "object", + "properties": { + "guava": { + "type": "number", + "description": "Guava is a fruit" + }, + "mango": { + "type": "number", + "description": "What is a mango?" + } + } + }` + + t.Log("[DEBUG] actual: ", string(fruitsSchemaJson)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(fruitsSchemaJson)) +} diff --git a/bundle/schema/schema.go b/bundle/schema/schema.go index 8b5c36d12..ac0b4f2ec 100644 --- a/bundle/schema/schema.go +++ b/bundle/schema/schema.go @@ -6,6 +6,7 @@ import ( "reflect" "strings" + "github.com/databricks/cli/libs/dyn/dynvar" "github.com/databricks/cli/libs/jsonschema" ) @@ -92,6 +93,14 @@ func jsonSchemaType(golangType reflect.Type) (jsonschema.Type, error) { // // - tracker: Keeps track of types / traceIds seen during recursive traversal func safeToSchema(golangType reflect.Type, docs *Docs, traceId string, tracker *tracker) (*jsonschema.Schema, error) { + // HACK to unblock CLI release (13th Feb 2024). This is temporary until proper + // support for recursive types is added to the schema generator. PR: https://github.com/databricks/cli/pull/1204 + if traceId == "for_each_task" { + return &jsonschema.Schema{ + Type: jsonschema.ObjectType, + }, nil + } + // WE ERROR OUT IF THERE ARE CYCLES IN THE JSON SCHEMA // There are mechanisms to deal with cycles though recursive identifiers in json // schema. However if we use them, we would need to make sure we are able to detect @@ -159,6 +168,22 @@ func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*jsonschem } jsonSchema := &jsonschema.Schema{Type: rootJavascriptType} + // If the type is a non-string primitive, then we allow it to be a string + // provided it's a pure variable reference (ie only a single variable reference). + if rootJavascriptType == jsonschema.BooleanType || rootJavascriptType == jsonschema.NumberType { + jsonSchema = &jsonschema.Schema{ + AnyOf: []*jsonschema.Schema{ + { + Type: rootJavascriptType, + }, + { + Type: jsonschema.StringType, + Pattern: dynvar.VariableRegex, + }, + }, + } + } + if docs != nil { jsonSchema.Description = docs.Description } diff --git a/bundle/schema/schema_test.go b/bundle/schema/schema_test.go index d44a2082a..6d9df0cc7 100644 --- a/bundle/schema/schema_test.go +++ b/bundle/schema/schema_test.go @@ -14,7 +14,15 @@ func TestIntSchema(t *testing.T) { expected := `{ - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }` schema, err := New(reflect.TypeOf(elemInt), nil) @@ -33,7 +41,15 @@ func TestBooleanSchema(t *testing.T) { expected := `{ - "type": "boolean" + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }` schema, err := New(reflect.TypeOf(elem), nil) @@ -101,46 +117,150 @@ func TestStructOfPrimitivesSchema(t *testing.T) { "type": "object", "properties": { "bool_val": { - "type": "boolean" + "anyOf": [ + { + "type": "boolean" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "float32_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "float64_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "int16_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "int32_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "int64_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "int8_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "int_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "string_val": { "type": "string" }, "uint16_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "uint32_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "uint64_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "uint8_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "uint_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -200,7 +320,15 @@ func TestStructOfStructsSchema(t *testing.T) { "type": "object", "properties": { "a": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "b": { "type": "string" @@ -257,7 +385,15 @@ func TestStructOfMapsSchema(t *testing.T) { "my_map": { "type": "object", "additionalProperties": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } } }, @@ -339,7 +475,15 @@ func TestMapOfPrimitivesSchema(t *testing.T) { `{ "type": "object", "additionalProperties": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }` @@ -368,7 +512,15 @@ func TestMapOfStructSchema(t *testing.T) { "type": "object", "properties": { "my_int": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -398,7 +550,15 @@ func TestMapOfMapSchema(t *testing.T) { "additionalProperties": { "type": "object", "additionalProperties": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } } }` @@ -495,7 +655,15 @@ func TestSliceOfMapSchema(t *testing.T) { "items": { "type": "object", "additionalProperties": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } } }` @@ -525,7 +693,15 @@ func TestSliceOfStructSchema(t *testing.T) { "type": "object", "properties": { "my_int": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -575,7 +751,15 @@ func TestEmbeddedStructSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "country": { "type": "string" @@ -607,7 +791,15 @@ func TestEmbeddedStructSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "home": { "type": "object", @@ -694,7 +886,15 @@ func TestNonAnnotatedFieldsAreSkipped(t *testing.T) { "type": "object", "properties": { "bar": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -728,7 +928,15 @@ func TestDashFieldsAreSkipped(t *testing.T) { "type": "object", "properties": { "bar": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -773,7 +981,15 @@ func TestPointerInStructSchema(t *testing.T) { "type": "object", "properties": { "ptr_val2": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -782,13 +998,29 @@ func TestPointerInStructSchema(t *testing.T) { ] }, "float_val": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "ptr_bar": { "type": "object", "properties": { "ptr_val2": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -797,7 +1029,15 @@ func TestPointerInStructSchema(t *testing.T) { ] }, "ptr_int": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "ptr_string": { "type": "string" @@ -860,7 +1100,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "name": { "type": "string" @@ -875,7 +1123,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "name": { "type": "string" @@ -895,7 +1151,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "name": { "type": "string" @@ -910,7 +1174,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "name": { "type": "string" @@ -932,7 +1204,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "name": { "type": "string" @@ -950,7 +1230,15 @@ func TestGenericSchema(t *testing.T) { "type": "object", "properties": { "age": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "name": { "type": "string" @@ -1028,16 +1316,40 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "bar": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "papaya": { "type": "object", "properties": { "a": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "b": { "type": "string" @@ -1111,7 +1423,15 @@ func TestDocIngestionForObject(t *testing.T) { "description": "docs for a" }, "b": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -1185,12 +1505,28 @@ func TestDocIngestionForSlice(t *testing.T) { "type": "object", "properties": { "guava": { - "type": "number", - "description": "docs for guava" + "description": "docs for guava", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "pineapple": { - "type": "number", - "description": "docs for pineapple" + "description": "docs for pineapple", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -1268,12 +1604,28 @@ func TestDocIngestionForMap(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number", - "description": "docs for apple" + "description": "docs for apple", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "mango": { - "type": "number", - "description": "docs for mango" + "description": "docs for mango", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -1324,8 +1676,16 @@ func TestDocIngestionForTopLevelPrimitive(t *testing.T) { "description": "docs for root", "properties": { "my_val": { - "type": "number", - "description": "docs for my val" + "description": "docs for my val", + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] } }, "additionalProperties": false, @@ -1395,7 +1755,15 @@ func TestInterfaceGeneratesEmptySchema(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "mango": {} }, @@ -1436,7 +1804,15 @@ func TestBundleReadOnlytag(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "pokemon": { "type": "object", @@ -1488,7 +1864,15 @@ func TestBundleInternalTag(t *testing.T) { "type": "object", "properties": { "apple": { - "type": "number" + "anyOf": [ + { + "type": "number" + }, + { + "type": "string", + "pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}" + } + ] }, "pokemon": { "type": "object", diff --git a/bundle/scripts/scripts.go b/bundle/scripts/scripts.go index 2f13bc19f..629b3a8ab 100644 --- a/bundle/scripts/scripts.go +++ b/bundle/scripts/scripts.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/exec" "github.com/databricks/cli/libs/log" ) @@ -28,15 +29,15 @@ func (m *script) Name() string { return fmt.Sprintf("scripts.%s", m.scriptHook) } -func (m *script) Apply(ctx context.Context, b *bundle.Bundle) error { - executor, err := exec.NewCommandExecutor(b.Config.Path) +func (m *script) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + executor, err := exec.NewCommandExecutor(b.RootPath) if err != nil { - return err + return diag.FromErr(err) } cmd, out, err := executeHook(ctx, executor, b, m.scriptHook) if err != nil { - return err + return diag.FromErr(fmt.Errorf("failed to execute script: %w", err)) } if cmd == nil { log.Debugf(ctx, "No script defined for %s, skipping", m.scriptHook) @@ -52,7 +53,12 @@ func (m *script) Apply(ctx context.Context, b *bundle.Bundle) error { line, err = reader.ReadString('\n') } - return cmd.Wait() + err = cmd.Wait() + if err != nil { + return diag.FromErr(fmt.Errorf("failed to execute script: %w", err)) + } + + return nil } func executeHook(ctx context.Context, executor *exec.Executor, b *bundle.Bundle, hook config.ScriptHook) (exec.Command, io.Reader, error) { diff --git a/bundle/scripts/scripts_test.go b/bundle/scripts/scripts_test.go index a8835b599..1bc216b61 100644 --- a/bundle/scripts/scripts_test.go +++ b/bundle/scripts/scripts_test.go @@ -23,7 +23,7 @@ func TestExecutesHook(t *testing.T) { }, } - executor, err := exec.NewCommandExecutor(b.Config.Path) + executor, err := exec.NewCommandExecutor(b.RootPath) require.NoError(t, err) _, out, err := executeHook(context.Background(), executor, b, config.ScriptPreBuild) require.NoError(t, err) @@ -34,3 +34,18 @@ func TestExecutesHook(t *testing.T) { require.NoError(t, err) require.Equal(t, "Hello", strings.TrimSpace(line)) } + +func TestExecuteMutator(t *testing.T) { + b := &bundle.Bundle{ + Config: config.Root{ + Experimental: &config.Experimental{ + Scripts: map[config.ScriptHook]config.Command{ + config.ScriptPreBuild: "echo 'Hello'", + }, + }, + }, + } + + diags := bundle.Apply(context.Background(), b, Execute(config.ScriptPreInit)) + require.NoError(t, diags.Error()) +} diff --git a/bundle/seq.go b/bundle/seq.go index 89e760d1f..c1260a3f0 100644 --- a/bundle/seq.go +++ b/bundle/seq.go @@ -1,6 +1,10 @@ package bundle -import "context" +import ( + "context" + + "github.com/databricks/cli/libs/diag" +) type seqMutator struct { mutators []Mutator @@ -10,14 +14,15 @@ func (s *seqMutator) Name() string { return "seq" } -func (s *seqMutator) Apply(ctx context.Context, b *Bundle) error { +func (s *seqMutator) Apply(ctx context.Context, b *Bundle) diag.Diagnostics { + var diags diag.Diagnostics for _, m := range s.mutators { - err := Apply(ctx, b, m) - if err != nil { - return err + diags = diags.Extend(Apply(ctx, b, m)) + if diags.HasError() { + break } } - return nil + return diags } func Seq(ms ...Mutator) Mutator { diff --git a/bundle/seq_test.go b/bundle/seq_test.go index d5c229e3c..74f975ed8 100644 --- a/bundle/seq_test.go +++ b/bundle/seq_test.go @@ -14,8 +14,8 @@ func TestSeqMutator(t *testing.T) { seqMutator := Seq(m1, m2, m3) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.NoError(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.NoError(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) @@ -30,8 +30,8 @@ func TestSeqWithDeferredMutator(t *testing.T) { seqMutator := Seq(m1, Defer(m2, m3), m4) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.NoError(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.NoError(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) @@ -47,8 +47,8 @@ func TestSeqWithErrorAndDeferredMutator(t *testing.T) { seqMutator := Seq(errorMut, Defer(m1, m2), m3) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.Error(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.Error(t, diags.Error()) assert.Equal(t, 1, errorMut.applyCalled) assert.Equal(t, 0, m1.applyCalled) @@ -64,8 +64,8 @@ func TestSeqWithErrorInsideDeferredMutator(t *testing.T) { seqMutator := Seq(m1, Defer(errorMut, m2), m3) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.Error(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.Error(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, errorMut.applyCalled) @@ -81,8 +81,8 @@ func TestSeqWithErrorInsideFinallyStage(t *testing.T) { seqMutator := Seq(m1, Defer(m2, errorMut), m3) b := &Bundle{} - err := Apply(context.Background(), b, seqMutator) - assert.Error(t, err) + diags := Apply(context.Background(), b, seqMutator) + assert.Error(t, diags.Error()) assert.Equal(t, 1, m1.applyCalled) assert.Equal(t, 1, m2.applyCalled) diff --git a/bundle/tests/bundle/pipeline_glob_paths/databricks.yml b/bundle/tests/bundle/pipeline_glob_paths/databricks.yml deleted file mode 100644 index 2e69691c1..000000000 --- a/bundle/tests/bundle/pipeline_glob_paths/databricks.yml +++ /dev/null @@ -1,12 +0,0 @@ -bundle: - name: pipeline_glob_paths - -resources: - pipelines: - nyc_taxi_pipeline: - name: "nyc taxi loader" - libraries: - - notebook: - path: ./dlt/* - - notebook: - path: ./non-existent diff --git a/bundle/tests/bundle/pipeline_glob_paths_test.go b/bundle/tests/bundle/pipeline_glob_paths_test.go deleted file mode 100644 index 539ffc9d3..000000000 --- a/bundle/tests/bundle/pipeline_glob_paths_test.go +++ /dev/null @@ -1,40 +0,0 @@ -package bundle - -import ( - "context" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/phases" - "github.com/databricks/cli/cmd/root" - "github.com/databricks/databricks-sdk-go/service/iam" - "github.com/stretchr/testify/require" -) - -func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) { - ctx := context.Background() - ctx = root.SetWorkspaceClient(ctx, nil) - - b, err := bundle.Load(ctx, "./pipeline_glob_paths") - require.NoError(t, err) - - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) - require.NoError(t, err) - b.Config.Bundle.Target = "default" - - b.Config.Workspace.CurrentUser = &config.User{User: &iam.User{UserName: "user@domain.com"}} - b.WorkspaceClient() - - m := phases.Initialize() - err = bundle.Apply(ctx, b, m) - require.Error(t, err) - require.ErrorContains(t, err, "notebook ./non-existent not found") - - require.Equal( - t, - b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Libraries[0].Notebook.Path, - "/Users/user@domain.com/.bundle/pipeline_glob_paths/default/files/dlt/nyc_taxi_loader", - ) -} diff --git a/bundle/tests/bundle/wheel_test.go b/bundle/tests/bundle/wheel_test.go deleted file mode 100644 index 57ecb54b9..000000000 --- a/bundle/tests/bundle/wheel_test.go +++ /dev/null @@ -1,88 +0,0 @@ -package bundle - -import ( - "context" - "path/filepath" - "testing" - - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/libraries" - "github.com/databricks/cli/bundle/phases" - "github.com/stretchr/testify/require" -) - -func TestBundlePythonWheelBuild(t *testing.T) { - ctx := context.Background() - b, err := bundle.Load(ctx, "./python_wheel") - require.NoError(t, err) - - m := phases.Build() - err = bundle.Apply(ctx, b, m) - require.NoError(t, err) - - matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") - require.NoError(t, err) - require.Equal(t, 1, len(matches)) - - match := libraries.MatchWithArtifacts() - err = bundle.Apply(ctx, b, match) - require.NoError(t, err) -} - -func TestBundlePythonWheelBuildAutoDetect(t *testing.T) { - ctx := context.Background() - b, err := bundle.Load(ctx, "./python_wheel_no_artifact") - require.NoError(t, err) - - m := phases.Build() - err = bundle.Apply(ctx, b, m) - require.NoError(t, err) - - matches, err := filepath.Glob("python_wheel/my_test_code/dist/my_test_code-*.whl") - require.NoError(t, err) - require.Equal(t, 1, len(matches)) - - match := libraries.MatchWithArtifacts() - err = bundle.Apply(ctx, b, match) - require.NoError(t, err) -} - -func TestBundlePythonWheelWithDBFSLib(t *testing.T) { - ctx := context.Background() - b, err := bundle.Load(ctx, "./python_wheel_dbfs_lib") - require.NoError(t, err) - - m := phases.Build() - err = bundle.Apply(ctx, b, m) - require.NoError(t, err) - - match := libraries.MatchWithArtifacts() - err = bundle.Apply(ctx, b, match) - require.NoError(t, err) -} - -func TestBundlePythonWheelBuildNoBuildJustUpload(t *testing.T) { - ctx := context.Background() - b, err := bundle.Load(ctx, "./python_wheel_no_artifact_no_setup") - require.NoError(t, err) - - m := phases.Build() - err = bundle.Apply(ctx, b, m) - require.NoError(t, err) - - match := libraries.MatchWithArtifacts() - err = bundle.Apply(ctx, b, match) - require.ErrorContains(t, err, "./non-existing/*.whl") - - require.NotZero(t, len(b.Config.Artifacts)) - - artifact := b.Config.Artifacts["my_test_code-0.0.1-py3-none-any.whl"] - require.NotNil(t, artifact) - require.Empty(t, artifact.BuildCommand) - require.Contains(t, artifact.Files[0].Source, filepath.Join( - b.Config.Path, - "package", - "my_test_code-0.0.1-py3-none-any.whl", - )) - require.True(t, artifact.Files[0].NeedsUpload()) -} diff --git a/bundle/tests/bundle_permissions_test.go b/bundle/tests/bundle_permissions_test.go index 3ea9dc2e0..b55cbdd2b 100644 --- a/bundle/tests/bundle_permissions_test.go +++ b/bundle/tests/bundle_permissions_test.go @@ -18,8 +18,9 @@ func TestBundlePermissions(t *testing.T) { assert.NotContains(t, b.Config.Permissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) assert.NotContains(t, b.Config.Permissions, resources.Permission{Level: "CAN_RUN", UserName: "bot@company.com"}) - err := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) + require.NoError(t, diags.Error()) + pipelinePermissions := b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Permissions assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_RUN", UserName: "test@company.com"}) assert.NotContains(t, pipelinePermissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) @@ -40,8 +41,9 @@ func TestBundlePermissionsDevTarget(t *testing.T) { assert.Contains(t, b.Config.Permissions, resources.Permission{Level: "CAN_VIEW", ServicePrincipalName: "1234-abcd"}) assert.Contains(t, b.Config.Permissions, resources.Permission{Level: "CAN_RUN", UserName: "bot@company.com"}) - err := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, permissions.ApplyBundlePermissions()) + require.NoError(t, diags.Error()) + pipelinePermissions := b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Permissions assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_RUN", UserName: "test@company.com"}) assert.Contains(t, pipelinePermissions, resources.Permission{Level: "CAN_MANAGE", GroupName: "devs"}) diff --git a/bundle/tests/complex_variables_test.go b/bundle/tests/complex_variables_test.go new file mode 100644 index 000000000..1badea6df --- /dev/null +++ b/bundle/tests/complex_variables_test.go @@ -0,0 +1,70 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/stretchr/testify/require" +) + +func TestComplexVariables(t *testing.T) { + b, diags := loadTargetWithDiags("variables/complex", "default") + require.Empty(t, diags) + + diags = bundle.Apply(context.Background(), b, bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferencesInComplexVariables(), + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) + + require.Equal(t, "13.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) + require.Equal(t, "Standard_DS3_v2", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, "some-policy-id", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) + require.Equal(t, 2, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) + require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) + require.Equal(t, "true", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) + + require.Equal(t, 3, len(b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries)) + require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ + Jar: "/path/to/jar", + }) + require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ + Egg: "/path/to/egg", + }) + require.Contains(t, b.Config.Resources.Jobs["my_job"].Tasks[0].Libraries, compute.Library{ + Whl: "/path/to/whl", + }) + + require.Equal(t, "task with spark version 13.2.x-scala2.11 and jar /path/to/jar", b.Config.Resources.Jobs["my_job"].Tasks[0].TaskKey) +} + +func TestComplexVariablesOverride(t *testing.T) { + b, diags := loadTargetWithDiags("variables/complex", "dev") + require.Empty(t, diags) + + diags = bundle.Apply(context.Background(), b, bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferencesInComplexVariables(), + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) + + require.Equal(t, "14.2.x-scala2.11", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkVersion) + require.Equal(t, "Standard_DS3_v3", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NodeTypeId) + require.Equal(t, 4, b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.NumWorkers) + require.Equal(t, "false", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.speculation"]) + + // Making sure the variable is overriden and not merged / extended + // These properties are set in the default target but not set in override target + // So they should be empty + require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.SparkConf["spark.random"]) + require.Equal(t, "", b.Config.Resources.Jobs["my_job"].JobClusters[0].NewCluster.PolicyId) +} diff --git a/bundle/tests/conflicting_resource_ids_test.go b/bundle/tests/conflicting_resource_ids_test.go index 704683ad5..e7f0aa28f 100644 --- a/bundle/tests/conflicting_resource_ids_test.go +++ b/bundle/tests/conflicting_resource_ids_test.go @@ -7,34 +7,36 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/phases" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestConflictingResourceIdsNoSubconfig(t *testing.T) { ctx := context.Background() - _, err := bundle.Load(ctx, "./conflicting_resource_ids/no_subconfigurations") + b, err := bundle.Load(ctx, "./conflicting_resource_ids/no_subconfigurations") + require.NoError(t, err) + diags := bundle.Apply(ctx, b, phases.Load()) bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/no_subconfigurations/databricks.yml") - assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, bundleConfigPath)) + assert.ErrorContains(t, diags.Error(), fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, bundleConfigPath)) } func TestConflictingResourceIdsOneSubconfig(t *testing.T) { ctx := context.Background() b, err := bundle.Load(ctx, "./conflicting_resource_ids/one_subconfiguration") require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + diags := bundle.Apply(ctx, b, phases.Load()) bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/databricks.yml") resourcesConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/resources.yml") - assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, resourcesConfigPath)) + assert.ErrorContains(t, diags.Error(), fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, resourcesConfigPath)) } func TestConflictingResourceIdsTwoSubconfigs(t *testing.T) { ctx := context.Background() b, err := bundle.Load(ctx, "./conflicting_resource_ids/two_subconfigurations") require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) + diags := bundle.Apply(ctx, b, phases.Load()) resources1ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources1.yml") resources2ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources2.yml") - assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", resources1ConfigPath, resources2ConfigPath)) + assert.ErrorContains(t, diags.Error(), fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", resources1ConfigPath, resources2ConfigPath)) } diff --git a/bundle/tests/enviroment_key_test.go b/bundle/tests/enviroment_key_test.go new file mode 100644 index 000000000..aed3964db --- /dev/null +++ b/bundle/tests/enviroment_key_test.go @@ -0,0 +1,23 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" + "github.com/stretchr/testify/require" +) + +func TestEnvironmentKeySupported(t *testing.T) { + _, diags := loadTargetWithDiags("./python_wheel/environment_key", "default") + require.Empty(t, diags) +} + +func TestEnvironmentKeyProvidedAndNoPanic(t *testing.T) { + b, diags := loadTargetWithDiags("./environment_key_only", "default") + require.Empty(t, diags) + + diags = bundle.Apply(context.Background(), b, libraries.ValidateLocalLibrariesExist()) + require.Empty(t, diags) +} diff --git a/bundle/tests/environment_git_test.go b/bundle/tests/environment_git_test.go index bb10825e4..ad4aec2e6 100644 --- a/bundle/tests/environment_git_test.go +++ b/bundle/tests/environment_git_test.go @@ -1,6 +1,8 @@ package config_tests import ( + "fmt" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -9,12 +11,14 @@ import ( func TestGitAutoLoadWithEnvironment(t *testing.T) { b := load(t, "./environments_autoload_git") assert.True(t, b.Config.Bundle.Git.Inferred) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") + validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") + assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) } func TestGitManuallySetBranchWithEnvironment(t *testing.T) { b := loadTarget(t, "./environments_autoload_git", "production") assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "main", b.Config.Bundle.Git.Branch) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") + validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") + assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) } diff --git a/bundle/tests/environment_key_only/databricks.yml b/bundle/tests/environment_key_only/databricks.yml new file mode 100644 index 000000000..caa34f8e3 --- /dev/null +++ b/bundle/tests/environment_key_only/databricks.yml @@ -0,0 +1,16 @@ +bundle: + name: environment_key_only + +resources: + jobs: + test_job: + name: "My Wheel Job" + tasks: + - task_key: TestTask + existing_cluster_id: "0717-132531-5opeqon1" + python_wheel_task: + package_name: "my_test_code" + entry_point: "run" + environment_key: "test_env" + environments: + - environment_key: "test_env" diff --git a/bundle/tests/environment_overrides/resources/databricks.yml b/bundle/tests/environment_overrides/resources/databricks.yml index df261ba03..137f8d9df 100644 --- a/bundle/tests/environment_overrides/resources/databricks.yml +++ b/bundle/tests/environment_overrides/resources/databricks.yml @@ -28,8 +28,6 @@ environments: pipelines: boolean1: - # Note: setting a property to a zero value (in Go) does not have effect. - # See the corresponding test for details. photon: false boolean2: diff --git a/bundle/tests/environment_overrides_test.go b/bundle/tests/environment_overrides_test.go index 91dc2c811..4a1115048 100644 --- a/bundle/tests/environment_overrides_test.go +++ b/bundle/tests/environment_overrides_test.go @@ -29,10 +29,7 @@ func TestEnvironmentOverridesResourcesStaging(t *testing.T) { b := loadTarget(t, "./environment_overrides/resources", "staging") assert.Equal(t, "staging job", b.Config.Resources.Jobs["job1"].Name) - // Overrides are only applied if they are not zero-valued. - // This means that in its current form, we cannot override a true value with a false value. - // Note: this is not desirable and will be addressed by representing our configuration - // in a different structure (e.g. with cty), instead of Go structs. - assert.Equal(t, true, b.Config.Resources.Pipelines["boolean1"].Photon) + // Override values are applied in the staging environment. + assert.Equal(t, false, b.Config.Resources.Pipelines["boolean1"].Photon) assert.Equal(t, true, b.Config.Resources.Pipelines["boolean2"].Photon) } diff --git a/bundle/tests/git_test.go b/bundle/tests/git_test.go index c5ae83a20..21eaaedd2 100644 --- a/bundle/tests/git_test.go +++ b/bundle/tests/git_test.go @@ -2,6 +2,8 @@ package config_tests import ( "context" + "fmt" + "strings" "testing" "github.com/databricks/cli/bundle" @@ -13,14 +15,16 @@ import ( func TestGitAutoLoad(t *testing.T) { b := load(t, "./autoload_git") assert.True(t, b.Config.Bundle.Git.Inferred) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") + validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") + assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) } func TestGitManuallySetBranch(t *testing.T) { b := loadTarget(t, "./autoload_git", "production") assert.False(t, b.Config.Bundle.Git.Inferred) assert.Equal(t, "main", b.Config.Bundle.Git.Branch) - assert.Contains(t, b.Config.Bundle.Git.OriginURL, "/cli") + validUrl := strings.Contains(b.Config.Bundle.Git.OriginURL, "/cli") || strings.Contains(b.Config.Bundle.Git.OriginURL, "/bricks") + assert.True(t, validUrl, fmt.Sprintf("Expected URL to contain '/cli' or '/bricks', got %s", b.Config.Bundle.Git.OriginURL)) } func TestGitBundleBranchValidation(t *testing.T) { @@ -34,6 +38,6 @@ func TestGitBundleBranchValidation(t *testing.T) { assert.Equal(t, "feature-a", b.Config.Bundle.Git.Branch) assert.Equal(t, "feature-b", b.Config.Bundle.Git.ActualBranch) - err := bundle.Apply(context.Background(), b, mutator.ValidateGitDetails()) - assert.ErrorContains(t, err, "not on the right Git branch:") + diags := bundle.Apply(context.Background(), b, mutator.ValidateGitDetails()) + assert.ErrorContains(t, diags.Error(), "not on the right Git branch:") } diff --git a/bundle/tests/include_multiple/my_first_job/resource.yml b/bundle/tests/include_multiple/my_first_job/resource.yml index c2be5a160..4bd7c7164 100644 --- a/bundle/tests/include_multiple/my_first_job/resource.yml +++ b/bundle/tests/include_multiple/my_first_job/resource.yml @@ -2,3 +2,4 @@ resources: jobs: my_first_job: id: 1 + name: "My First Job" diff --git a/bundle/tests/include_multiple/my_second_job/resource.yml b/bundle/tests/include_multiple/my_second_job/resource.yml index 2c28c4622..3a1514055 100644 --- a/bundle/tests/include_multiple/my_second_job/resource.yml +++ b/bundle/tests/include_multiple/my_second_job/resource.yml @@ -2,3 +2,4 @@ resources: jobs: my_second_job: id: 2 + name: "My Second Job" diff --git a/bundle/tests/include_test.go b/bundle/tests/include_test.go index eb09d1aa0..5b0235f60 100644 --- a/bundle/tests/include_test.go +++ b/bundle/tests/include_test.go @@ -7,7 +7,7 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/phases" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "golang.org/x/exp/maps" @@ -17,9 +17,9 @@ func TestIncludeInvalid(t *testing.T) { ctx := context.Background() b, err := bundle.Load(ctx, "./include_invalid") require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) - require.Error(t, err) - assert.Contains(t, err.Error(), "notexists.yml defined in 'include' section does not match any files") + diags := bundle.Apply(ctx, b, phases.Load()) + require.Error(t, diags.Error()) + assert.ErrorContains(t, diags.Error(), "notexists.yml defined in 'include' section does not match any files") } func TestIncludeWithGlob(t *testing.T) { diff --git a/bundle/tests/include_with_glob/job.yml b/bundle/tests/include_with_glob/job.yml index 3d609c529..a98577818 100644 --- a/bundle/tests/include_with_glob/job.yml +++ b/bundle/tests/include_with_glob/job.yml @@ -2,3 +2,4 @@ resources: jobs: my_job: id: 1 + name: "My Job" diff --git a/bundle/tests/interpolation_test.go b/bundle/tests/interpolation_test.go index 837891a07..920b9000d 100644 --- a/bundle/tests/interpolation_test.go +++ b/bundle/tests/interpolation_test.go @@ -5,30 +5,29 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/interpolation" + "github.com/databricks/cli/bundle/config/mutator" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestInterpolation(t *testing.T) { b := load(t, "./interpolation") - err := bundle.Apply(context.Background(), b, interpolation.Interpolate( - interpolation.IncludeLookupsInPath("bundle"), - interpolation.IncludeLookupsInPath("workspace"), + diags := bundle.Apply(context.Background(), b, mutator.ResolveVariableReferences( + "bundle", + "workspace", )) - require.NoError(t, err) + require.NoError(t, diags.Error()) assert.Equal(t, "foo bar", b.Config.Bundle.Name) assert.Equal(t, "foo bar | bar", b.Config.Resources.Jobs["my_job"].Name) } func TestInterpolationWithTarget(t *testing.T) { b := loadTarget(t, "./interpolation_target", "development") - err := bundle.Apply(context.Background(), b, interpolation.Interpolate( - interpolation.IncludeLookupsInPath("bundle"), - interpolation.IncludeLookupsInPath("workspace"), + diags := bundle.Apply(context.Background(), b, mutator.ResolveVariableReferences( + "bundle", + "workspace", )) - require.NoError(t, err) + require.NoError(t, diags.Error()) assert.Equal(t, "foo bar", b.Config.Bundle.Name) assert.Equal(t, "foo bar | bar | development | development", b.Config.Resources.Jobs["my_job"].Name) - } diff --git a/bundle/tests/job_cluster_key/databricks.yml b/bundle/tests/job_cluster_key/databricks.yml new file mode 100644 index 000000000..bd863db3e --- /dev/null +++ b/bundle/tests/job_cluster_key/databricks.yml @@ -0,0 +1,27 @@ +bundle: + name: job_cluster_key + +workspace: + host: https://acme.cloud.databricks.com/ + +targets: + default: + resources: + jobs: + foo: + name: job + tasks: + - task_key: test + job_cluster_key: key + development: + resources: + jobs: + foo: + job_clusters: + - job_cluster_key: key + new_cluster: + node_type_id: i3.xlarge + num_workers: 1 + tasks: + - task_key: test + job_cluster_key: key diff --git a/bundle/tests/job_cluster_key_test.go b/bundle/tests/job_cluster_key_test.go new file mode 100644 index 000000000..5a8b368e5 --- /dev/null +++ b/bundle/tests/job_cluster_key_test.go @@ -0,0 +1,28 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/validate" + "github.com/databricks/cli/libs/diag" + "github.com/stretchr/testify/require" +) + +func TestJobClusterKeyNotDefinedTest(t *testing.T) { + b := loadTarget(t, "./job_cluster_key", "default") + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined()) + require.Len(t, diags, 1) + require.NoError(t, diags.Error()) + require.Equal(t, diags[0].Severity, diag.Warning) + require.Equal(t, diags[0].Summary, "job_cluster_key key is not defined") +} + +func TestJobClusterKeyDefinedTest(t *testing.T) { + b := loadTarget(t, "./job_cluster_key", "development") + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.JobClusterKeyDefined()) + require.Len(t, diags, 0) +} diff --git a/bundle/tests/job_with_spark_conf_test.go b/bundle/tests/job_with_spark_conf_test.go index a2c04c5ee..90bdc977d 100644 --- a/bundle/tests/job_with_spark_conf_test.go +++ b/bundle/tests/job_with_spark_conf_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestJobWithSparkConf(t *testing.T) { @@ -14,9 +15,17 @@ func TestJobWithSparkConf(t *testing.T) { assert.Len(t, job.JobClusters, 1) assert.Equal(t, "test_cluster", job.JobClusters[0].JobClusterKey) - // Existing behavior is such that including non-string values - // in the spark_conf map will cause the job to fail to load. - // This is expected to be solved once we switch to the custom YAML loader. - tasks := job.Tasks - assert.Len(t, tasks, 0, "see https://github.com/databricks/cli/issues/992") + // This test exists because of https://github.com/databricks/cli/issues/992. + // It is solved for bundles as of https://github.com/databricks/cli/pull/1098. + require.Len(t, job.JobClusters, 1) + cluster := job.JobClusters[0] + assert.Equal(t, "14.2.x-scala2.12", cluster.NewCluster.SparkVersion) + assert.Equal(t, "i3.xlarge", cluster.NewCluster.NodeTypeId) + assert.Equal(t, 2, cluster.NewCluster.NumWorkers) + assert.Equal(t, map[string]string{ + "spark.string": "string", + "spark.int": "1", + "spark.bool": "true", + "spark.float": "1.2", + }, cluster.NewCluster.SparkConf) } diff --git a/bundle/tests/loader.go b/bundle/tests/loader.go index f23b10764..8eddcf9a1 100644 --- a/bundle/tests/loader.go +++ b/bundle/tests/loader.go @@ -6,6 +6,8 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/libs/diag" "github.com/stretchr/testify/require" ) @@ -13,14 +15,30 @@ func load(t *testing.T, path string) *bundle.Bundle { ctx := context.Background() b, err := bundle.Load(ctx, path) require.NoError(t, err) - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) - require.NoError(t, err) + diags := bundle.Apply(ctx, b, phases.Load()) + require.NoError(t, diags.Error()) return b } func loadTarget(t *testing.T, path, env string) *bundle.Bundle { - b := load(t, path) - err := bundle.Apply(context.Background(), b, mutator.SelectTarget(env)) - require.NoError(t, err) + b, diags := loadTargetWithDiags(path, env) + require.NoError(t, diags.Error()) return b } + +func loadTargetWithDiags(path, env string) (*bundle.Bundle, diag.Diagnostics) { + ctx := context.Background() + b, err := bundle.Load(ctx, path) + if err != nil { + return nil, diag.FromErr(err) + } + + diags := bundle.Apply(ctx, b, bundle.Seq( + phases.LoadNamedTarget(env), + mutator.RewriteSyncPaths(), + mutator.MergeJobClusters(), + mutator.MergeJobTasks(), + mutator.MergePipelineClusters(), + )) + return b, diags +} diff --git a/bundle/tests/override_sync_test.go b/bundle/tests/override_sync_test.go deleted file mode 100644 index a2d3a05f5..000000000 --- a/bundle/tests/override_sync_test.go +++ /dev/null @@ -1,43 +0,0 @@ -package config_tests - -import ( - "testing" - - "github.com/stretchr/testify/assert" -) - -func TestOverrideSyncTarget(t *testing.T) { - b := load(t, "./override_sync") - assert.ElementsMatch(t, []string{"src/*"}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync", "development") - assert.ElementsMatch(t, []string{"src/*", "tests/*"}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{"dist"}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync", "staging") - assert.ElementsMatch(t, []string{"src/*", "fixtures/*"}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync", "prod") - assert.ElementsMatch(t, []string{"src/*"}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) -} - -func TestOverrideSyncTargetNoRootSync(t *testing.T) { - b := load(t, "./override_sync_no_root") - assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync_no_root", "development") - assert.ElementsMatch(t, []string{"tests/*"}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{"dist"}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync_no_root", "staging") - assert.ElementsMatch(t, []string{"fixtures/*"}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) - - b = loadTarget(t, "./override_sync_no_root", "prod") - assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) -} diff --git a/bundle/tests/path_translation/fallback/README.md b/bundle/tests/path_translation/fallback/README.md new file mode 100644 index 000000000..ec5f6c740 --- /dev/null +++ b/bundle/tests/path_translation/fallback/README.md @@ -0,0 +1,42 @@ +# Test path translation (with fallback to previous behavior) + +As of v0.214.0, all paths in a resource definition were resolved relative to the path +where that resource was first defined. If those paths were specified in the same file, +or in a different file in the same directory, this would be intuitive. + +If those paths were specified in a different file in a different directory, they would +still be resolved relative to the original file. + +For example, a job defined in `./resources/my_job.yml` with an override +in `./override.yml` would have to use paths relative to `./resources`. +This is counter-intuitive and error-prone, and we changed this behavior +in https://github.com/databricks/cli/pull/1273. + +## Appendix + +Q: Why did this behavior apply as of v0.214.0? + +A: With the introduction of dynamic configuration loading, we keep track + of the location (file, line, column) where a resource is defined. + This location information is used to perform path translation, but upon + introduction in v0.214.0, the code still used only a single path per resource. + Due to the semantics of merging two `dyn.Value` objects, the location + information of the first existing value is used for the merged value. + This meant that all paths for a resource were resolved relative to the + location where the resource was first defined. + +Q: What was the behavior before v0.214.0? + +A: Before we relied on dynamic configuration loading, all configuration was + maintained in a typed struct. The path for a resource was an unexported field on the + resource and was set right after loading the configuration file that contains it. + Target overrides contained the same path field, and applying a target override + would set the path for the resource to the path of the target override. + This meant that all paths for a resource were resolved relative to the + location where the resource was last defined. + +Q: Why are we maintaining compatibility with the old behavior? + +A: We want to avoid breaking existing configurations that depend on this behavior. + Use of the old behavior should trigger warnings with a call to action to update. + We can include a deprecation timeline to remove the old behavior in the future. diff --git a/bundle/tests/path_translation/fallback/databricks.yml b/bundle/tests/path_translation/fallback/databricks.yml new file mode 100644 index 000000000..92be3f921 --- /dev/null +++ b/bundle/tests/path_translation/fallback/databricks.yml @@ -0,0 +1,13 @@ +bundle: + name: path_translation_fallback + +include: + - "resources/*.yml" + - "override_*.yml" + +targets: + development: + default: true + + error: + default: false diff --git a/bundle/tests/path_translation/fallback/override_job.yml b/bundle/tests/path_translation/fallback/override_job.yml new file mode 100644 index 000000000..c4354b14b --- /dev/null +++ b/bundle/tests/path_translation/fallback/override_job.yml @@ -0,0 +1,41 @@ +targets: + development: + resources: + jobs: + my_job: + tasks: + - task_key: notebook_example + notebook_task: + notebook_path: ../src/notebook.py + + - task_key: spark_python_example + spark_python_task: + python_file: ../src/file.py + + - task_key: dbt_example + dbt_task: + project_directory: ../src/dbt_project + commands: + - "dbt run" + + - task_key: sql_example + sql_task: + file: + path: ../src/sql.sql + warehouse_id: cafef00d + + - task_key: python_wheel_example + python_wheel_task: + package_name: my_package + + # Append library; the path is resolved relative to the job's directory. + libraries: + - whl: ../dist/wheel2.whl + + - task_key: spark_jar_example + spark_jar_task: + main_class_name: com.example.Main + + # Append library; the path is resolved relative to the job's directory. + libraries: + - jar: ../target/jar2.jar diff --git a/bundle/tests/path_translation/fallback/override_pipeline.yml b/bundle/tests/path_translation/fallback/override_pipeline.yml new file mode 100644 index 000000000..e5790256a --- /dev/null +++ b/bundle/tests/path_translation/fallback/override_pipeline.yml @@ -0,0 +1,13 @@ +targets: + development: + resources: + pipelines: + my_pipeline: + + # Append library; the path is resolved relative to the pipeline's directory. + libraries: + - file: + path: ../src/file2.py + + - notebook: + path: ../src/notebook2.py diff --git a/bundle/tests/path_translation/fallback/resources/my_job.yml b/bundle/tests/path_translation/fallback/resources/my_job.yml new file mode 100644 index 000000000..4907df4f0 --- /dev/null +++ b/bundle/tests/path_translation/fallback/resources/my_job.yml @@ -0,0 +1,36 @@ +resources: + jobs: + my_job: + name: "placeholder" + tasks: + - task_key: notebook_example + notebook_task: + notebook_path: "this value is overridden" + + - task_key: spark_python_example + spark_python_task: + python_file: "this value is overridden" + + - task_key: dbt_example + dbt_task: + project_directory: "this value is overridden" + commands: + - "dbt run" + + - task_key: sql_example + sql_task: + file: + path: "this value is overridden" + warehouse_id: cafef00d + + - task_key: python_wheel_example + python_wheel_task: + package_name: my_package + libraries: + - whl: ../dist/wheel1.whl + + - task_key: spark_jar_example + spark_jar_task: + main_class_name: com.example.Main + libraries: + - jar: ../target/jar1.jar diff --git a/bundle/tests/path_translation/fallback/resources/my_pipeline.yml b/bundle/tests/path_translation/fallback/resources/my_pipeline.yml new file mode 100644 index 000000000..457856d1d --- /dev/null +++ b/bundle/tests/path_translation/fallback/resources/my_pipeline.yml @@ -0,0 +1,9 @@ +resources: + pipelines: + my_pipeline: + name: "placeholder" + libraries: + - file: + path: ../src/file1.py + - notebook: + path: ../src/notebook1.py diff --git a/bundle/tests/path_translation/fallback/src/dbt_project/.gitkeep b/bundle/tests/path_translation/fallback/src/dbt_project/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/tests/path_translation/fallback/src/file.py b/bundle/tests/path_translation/fallback/src/file.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/file.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/file1.py b/bundle/tests/path_translation/fallback/src/file1.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/file1.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/file2.py b/bundle/tests/path_translation/fallback/src/file2.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/file2.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/notebook.py b/bundle/tests/path_translation/fallback/src/notebook.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/notebook.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/notebook1.py b/bundle/tests/path_translation/fallback/src/notebook1.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/notebook1.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/notebook2.py b/bundle/tests/path_translation/fallback/src/notebook2.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/notebook2.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/fallback/src/sql.sql b/bundle/tests/path_translation/fallback/src/sql.sql new file mode 100644 index 000000000..24c55832d --- /dev/null +++ b/bundle/tests/path_translation/fallback/src/sql.sql @@ -0,0 +1 @@ +select "Hello, World!" diff --git a/bundle/tests/path_translation/nominal/README.md b/bundle/tests/path_translation/nominal/README.md new file mode 100644 index 000000000..aa7a52ab2 --- /dev/null +++ b/bundle/tests/path_translation/nominal/README.md @@ -0,0 +1,6 @@ +# Test path translation (nominal behavior) + +As of v0.216.0 (PR at https://github.com/databricks/cli/pull/1273), all paths in a resource +definition are resolved relative to the directory of the file where they are defined. + +This is more intuitive than the previous behavior (see `../fallback/README.md` for details). diff --git a/bundle/tests/path_translation/nominal/databricks.yml b/bundle/tests/path_translation/nominal/databricks.yml new file mode 100644 index 000000000..cd425920d --- /dev/null +++ b/bundle/tests/path_translation/nominal/databricks.yml @@ -0,0 +1,13 @@ +bundle: + name: path_translation_nominal + +include: + - "resources/*.yml" + - "override_*.yml" + +targets: + development: + default: true + + error: + default: false diff --git a/bundle/tests/path_translation/nominal/override_job.yml b/bundle/tests/path_translation/nominal/override_job.yml new file mode 100644 index 000000000..9ce90e63e --- /dev/null +++ b/bundle/tests/path_translation/nominal/override_job.yml @@ -0,0 +1,53 @@ +targets: + development: + resources: + jobs: + my_job: + tasks: + - task_key: notebook_example + notebook_task: + notebook_path: ./src/notebook.py + + - task_key: spark_python_example + spark_python_task: + python_file: ./src/file.py + + - task_key: dbt_example + dbt_task: + project_directory: ./src/dbt_project + commands: + - "dbt run" + + - task_key: sql_example + sql_task: + file: + path: ./src/sql.sql + warehouse_id: cafef00d + + - task_key: python_wheel_example + python_wheel_task: + package_name: my_package + + # Append library; the path is resolved relative to this file's directory. + libraries: + - whl: ./dist/wheel2.whl + + - task_key: spark_jar_example + spark_jar_task: + main_class_name: com.example.Main + + # Append library; the path is resolved relative to this file's directory. + libraries: + - jar: ./target/jar2.jar + + - task_key: for_each_notebook_example + for_each_task: + task: + notebook_task: + notebook_path: ./src/notebook.py + + - task_key: for_each_spark_python_example + for_each_task: + task: + spark_python_task: + python_file: ./src/file.py diff --git a/bundle/tests/path_translation/nominal/override_pipeline.yml b/bundle/tests/path_translation/nominal/override_pipeline.yml new file mode 100644 index 000000000..ac1fff410 --- /dev/null +++ b/bundle/tests/path_translation/nominal/override_pipeline.yml @@ -0,0 +1,13 @@ +targets: + development: + resources: + pipelines: + my_pipeline: + + # Append library; the path is resolved relative to this file's directory. + libraries: + - file: + path: src/file2.py + + - notebook: + path: src/notebook2.py diff --git a/bundle/tests/path_translation/nominal/resources/my_job.yml b/bundle/tests/path_translation/nominal/resources/my_job.yml new file mode 100644 index 000000000..2020c9dc8 --- /dev/null +++ b/bundle/tests/path_translation/nominal/resources/my_job.yml @@ -0,0 +1,48 @@ +resources: + jobs: + my_job: + name: "placeholder" + tasks: + - task_key: notebook_example + notebook_task: + notebook_path: "this value is overridden" + + - task_key: spark_python_example + spark_python_task: + python_file: "this value is overridden" + + - task_key: dbt_example + dbt_task: + project_directory: "this value is overridden" + commands: + - "dbt run" + + - task_key: sql_example + sql_task: + file: + path: "this value is overridden" + warehouse_id: cafef00d + + - task_key: python_wheel_example + python_wheel_task: + package_name: my_package + libraries: + - whl: ../dist/wheel1.whl + + - task_key: spark_jar_example + spark_jar_task: + main_class_name: com.example.Main + libraries: + - jar: ../target/jar1.jar + + - task_key: for_each_notebook_example + for_each_task: + task: + notebook_task: + notebook_path: "this value is overridden" + + - task_key: for_each_spark_python_example + for_each_task: + task: + spark_python_task: + python_file: "this value is overridden" diff --git a/bundle/tests/path_translation/nominal/resources/my_pipeline.yml b/bundle/tests/path_translation/nominal/resources/my_pipeline.yml new file mode 100644 index 000000000..457856d1d --- /dev/null +++ b/bundle/tests/path_translation/nominal/resources/my_pipeline.yml @@ -0,0 +1,9 @@ +resources: + pipelines: + my_pipeline: + name: "placeholder" + libraries: + - file: + path: ../src/file1.py + - notebook: + path: ../src/notebook1.py diff --git a/bundle/tests/path_translation/nominal/src/dbt_project/.gitkeep b/bundle/tests/path_translation/nominal/src/dbt_project/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/tests/path_translation/nominal/src/file.py b/bundle/tests/path_translation/nominal/src/file.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/file.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/file1.py b/bundle/tests/path_translation/nominal/src/file1.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/file1.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/file2.py b/bundle/tests/path_translation/nominal/src/file2.py new file mode 100644 index 000000000..7df869a15 --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/file2.py @@ -0,0 +1 @@ +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/notebook.py b/bundle/tests/path_translation/nominal/src/notebook.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/notebook.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/notebook1.py b/bundle/tests/path_translation/nominal/src/notebook1.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/notebook1.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/notebook2.py b/bundle/tests/path_translation/nominal/src/notebook2.py new file mode 100644 index 000000000..38d86b79c --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/notebook2.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("Hello, World!") diff --git a/bundle/tests/path_translation/nominal/src/sql.sql b/bundle/tests/path_translation/nominal/src/sql.sql new file mode 100644 index 000000000..24c55832d --- /dev/null +++ b/bundle/tests/path_translation/nominal/src/sql.sql @@ -0,0 +1 @@ +select "Hello, World!" diff --git a/bundle/tests/path_translation_test.go b/bundle/tests/path_translation_test.go new file mode 100644 index 000000000..05702d2a2 --- /dev/null +++ b/bundle/tests/path_translation_test.go @@ -0,0 +1,112 @@ +package config_tests + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/mutator" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestPathTranslationFallback(t *testing.T) { + b := loadTarget(t, "./path_translation/fallback", "development") + + m := mutator.TranslatePaths() + diags := bundle.Apply(context.Background(), b, m) + require.NoError(t, diags.Error()) + + j := b.Config.Resources.Jobs["my_job"] + assert.Len(t, j.Tasks, 6) + + assert.Equal(t, "notebook_example", filepath.ToSlash(j.Tasks[0].TaskKey)) + assert.Equal(t, "src/notebook", filepath.ToSlash(j.Tasks[0].NotebookTask.NotebookPath)) + + assert.Equal(t, "spark_python_example", filepath.ToSlash(j.Tasks[1].TaskKey)) + assert.Equal(t, "src/file.py", filepath.ToSlash(j.Tasks[1].SparkPythonTask.PythonFile)) + + assert.Equal(t, "dbt_example", filepath.ToSlash(j.Tasks[2].TaskKey)) + assert.Equal(t, "src/dbt_project", filepath.ToSlash(j.Tasks[2].DbtTask.ProjectDirectory)) + + assert.Equal(t, "sql_example", filepath.ToSlash(j.Tasks[3].TaskKey)) + assert.Equal(t, "src/sql.sql", filepath.ToSlash(j.Tasks[3].SqlTask.File.Path)) + + assert.Equal(t, "python_wheel_example", filepath.ToSlash(j.Tasks[4].TaskKey)) + assert.Equal(t, "dist/wheel1.whl", filepath.ToSlash(j.Tasks[4].Libraries[0].Whl)) + assert.Equal(t, "dist/wheel2.whl", filepath.ToSlash(j.Tasks[4].Libraries[1].Whl)) + + assert.Equal(t, "spark_jar_example", filepath.ToSlash(j.Tasks[5].TaskKey)) + assert.Equal(t, "target/jar1.jar", filepath.ToSlash(j.Tasks[5].Libraries[0].Jar)) + assert.Equal(t, "target/jar2.jar", filepath.ToSlash(j.Tasks[5].Libraries[1].Jar)) + + p := b.Config.Resources.Pipelines["my_pipeline"] + assert.Len(t, p.Libraries, 4) + + assert.Equal(t, "src/file1.py", filepath.ToSlash(p.Libraries[0].File.Path)) + assert.Equal(t, "src/notebook1", filepath.ToSlash(p.Libraries[1].Notebook.Path)) + assert.Equal(t, "src/file2.py", filepath.ToSlash(p.Libraries[2].File.Path)) + assert.Equal(t, "src/notebook2", filepath.ToSlash(p.Libraries[3].Notebook.Path)) +} + +func TestPathTranslationFallbackError(t *testing.T) { + b := loadTarget(t, "./path_translation/fallback", "error") + + m := mutator.TranslatePaths() + diags := bundle.Apply(context.Background(), b, m) + assert.ErrorContains(t, diags.Error(), `notebook this value is overridden not found`) +} + +func TestPathTranslationNominal(t *testing.T) { + b := loadTarget(t, "./path_translation/nominal", "development") + + m := mutator.TranslatePaths() + diags := bundle.Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) + + j := b.Config.Resources.Jobs["my_job"] + assert.Len(t, j.Tasks, 8) + + assert.Equal(t, "notebook_example", filepath.ToSlash(j.Tasks[0].TaskKey)) + assert.Equal(t, "src/notebook", filepath.ToSlash(j.Tasks[0].NotebookTask.NotebookPath)) + + assert.Equal(t, "spark_python_example", filepath.ToSlash(j.Tasks[1].TaskKey)) + assert.Equal(t, "src/file.py", filepath.ToSlash(j.Tasks[1].SparkPythonTask.PythonFile)) + + assert.Equal(t, "dbt_example", filepath.ToSlash(j.Tasks[2].TaskKey)) + assert.Equal(t, "src/dbt_project", filepath.ToSlash(j.Tasks[2].DbtTask.ProjectDirectory)) + + assert.Equal(t, "sql_example", filepath.ToSlash(j.Tasks[3].TaskKey)) + assert.Equal(t, "src/sql.sql", filepath.ToSlash(j.Tasks[3].SqlTask.File.Path)) + + assert.Equal(t, "python_wheel_example", filepath.ToSlash(j.Tasks[4].TaskKey)) + assert.Equal(t, "dist/wheel1.whl", filepath.ToSlash(j.Tasks[4].Libraries[0].Whl)) + assert.Equal(t, "dist/wheel2.whl", filepath.ToSlash(j.Tasks[4].Libraries[1].Whl)) + + assert.Equal(t, "spark_jar_example", filepath.ToSlash(j.Tasks[5].TaskKey)) + assert.Equal(t, "target/jar1.jar", filepath.ToSlash(j.Tasks[5].Libraries[0].Jar)) + assert.Equal(t, "target/jar2.jar", filepath.ToSlash(j.Tasks[5].Libraries[1].Jar)) + + assert.Equal(t, "for_each_notebook_example", filepath.ToSlash(j.Tasks[6].TaskKey)) + assert.Equal(t, "src/notebook", filepath.ToSlash(j.Tasks[6].ForEachTask.Task.NotebookTask.NotebookPath)) + + assert.Equal(t, "for_each_spark_python_example", filepath.ToSlash(j.Tasks[7].TaskKey)) + assert.Equal(t, "src/file.py", filepath.ToSlash(j.Tasks[7].ForEachTask.Task.SparkPythonTask.PythonFile)) + + p := b.Config.Resources.Pipelines["my_pipeline"] + assert.Len(t, p.Libraries, 4) + + assert.Equal(t, "src/file1.py", filepath.ToSlash(p.Libraries[0].File.Path)) + assert.Equal(t, "src/notebook1", filepath.ToSlash(p.Libraries[1].Notebook.Path)) + assert.Equal(t, "src/file2.py", filepath.ToSlash(p.Libraries[2].File.Path)) + assert.Equal(t, "src/notebook2", filepath.ToSlash(p.Libraries[3].Notebook.Path)) +} + +func TestPathTranslationNominalError(t *testing.T) { + b := loadTarget(t, "./path_translation/nominal", "error") + + m := mutator.TranslatePaths() + diags := bundle.Apply(context.Background(), b, m) + assert.ErrorContains(t, diags.Error(), `notebook this value is overridden not found`) +} diff --git a/bundle/tests/pipeline_glob_paths/databricks.yml b/bundle/tests/pipeline_glob_paths/databricks.yml new file mode 100644 index 000000000..d25b977ba --- /dev/null +++ b/bundle/tests/pipeline_glob_paths/databricks.yml @@ -0,0 +1,24 @@ +bundle: + name: pipeline_glob_paths + +resources: + pipelines: + nyc_taxi_pipeline: + name: "nyc taxi loader" + libraries: + - notebook: + path: ./dlt/* + +targets: + default: + default: true + + error: + default: false + + resources: + pipelines: + nyc_taxi_pipeline: + libraries: + - notebook: + path: ./non-existent diff --git a/bundle/tests/bundle/pipeline_glob_paths/dlt/nyc_taxi_loader.py b/bundle/tests/pipeline_glob_paths/dlt/nyc_taxi_loader.py similarity index 100% rename from bundle/tests/bundle/pipeline_glob_paths/dlt/nyc_taxi_loader.py rename to bundle/tests/pipeline_glob_paths/dlt/nyc_taxi_loader.py diff --git a/bundle/tests/pipeline_glob_paths_test.go b/bundle/tests/pipeline_glob_paths_test.go new file mode 100644 index 000000000..bf5039b5f --- /dev/null +++ b/bundle/tests/pipeline_glob_paths_test.go @@ -0,0 +1,55 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestExpandPipelineGlobPaths(t *testing.T) { + b := loadTarget(t, "./pipeline_glob_paths", "default") + + // Configure mock workspace client + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &config.Config{ + Host: "https://mock.databricks.workspace.com", + } + m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ + UserName: "user@domain.com", + }, nil) + b.SetWorkpaceClient(m.WorkspaceClient) + + ctx := context.Background() + diags := bundle.Apply(ctx, b, phases.Initialize()) + require.NoError(t, diags.Error()) + require.Equal( + t, + "/Users/user@domain.com/.bundle/pipeline_glob_paths/default/files/dlt/nyc_taxi_loader", + b.Config.Resources.Pipelines["nyc_taxi_pipeline"].Libraries[0].Notebook.Path, + ) +} + +func TestExpandPipelineGlobPathsWithNonExistent(t *testing.T) { + b := loadTarget(t, "./pipeline_glob_paths", "error") + + // Configure mock workspace client + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &config.Config{ + Host: "https://mock.databricks.workspace.com", + } + m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ + UserName: "user@domain.com", + }, nil) + b.SetWorkpaceClient(m.WorkspaceClient) + + ctx := context.Background() + diags := bundle.Apply(ctx, b, phases.Initialize()) + require.ErrorContains(t, diags.Error(), "notebook ./non-existent not found") +} diff --git a/bundle/tests/bundle/python_wheel/.gitignore b/bundle/tests/python_wheel/environment_key/.gitignore similarity index 100% rename from bundle/tests/bundle/python_wheel/.gitignore rename to bundle/tests/python_wheel/environment_key/.gitignore diff --git a/bundle/tests/python_wheel/environment_key/databricks.yml b/bundle/tests/python_wheel/environment_key/databricks.yml new file mode 100644 index 000000000..198f8c0d2 --- /dev/null +++ b/bundle/tests/python_wheel/environment_key/databricks.yml @@ -0,0 +1,26 @@ +bundle: + name: environment_key + +artifacts: + my_test_code: + type: whl + path: "./my_test_code" + build: "python3 setup.py bdist_wheel" + +resources: + jobs: + test_job: + name: "My Wheel Job" + tasks: + - task_key: TestTask + existing_cluster_id: "0717-132531-5opeqon1" + python_wheel_task: + package_name: "my_test_code" + entry_point: "run" + environment_key: "test_env" + environments: + - environment_key: "test_env" + spec: + client: "1" + dependencies: + - ./my_test_code/dist/*.whl diff --git a/bundle/tests/bundle/python_wheel/my_test_code/setup.py b/bundle/tests/python_wheel/environment_key/my_test_code/setup.py similarity index 100% rename from bundle/tests/bundle/python_wheel/my_test_code/setup.py rename to bundle/tests/python_wheel/environment_key/my_test_code/setup.py diff --git a/bundle/tests/bundle/python_wheel/my_test_code/src/__init__.py b/bundle/tests/python_wheel/environment_key/my_test_code/src/__init__.py similarity index 100% rename from bundle/tests/bundle/python_wheel/my_test_code/src/__init__.py rename to bundle/tests/python_wheel/environment_key/my_test_code/src/__init__.py diff --git a/bundle/tests/bundle/python_wheel/my_test_code/src/__main__.py b/bundle/tests/python_wheel/environment_key/my_test_code/src/__main__.py similarity index 100% rename from bundle/tests/bundle/python_wheel/my_test_code/src/__main__.py rename to bundle/tests/python_wheel/environment_key/my_test_code/src/__main__.py diff --git a/bundle/tests/bundle/python_wheel_no_artifact/.gitignore b/bundle/tests/python_wheel/python_wheel/.gitignore similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/.gitignore rename to bundle/tests/python_wheel/python_wheel/.gitignore diff --git a/bundle/tests/bundle/python_wheel/bundle.yml b/bundle/tests/python_wheel/python_wheel/bundle.yml similarity index 100% rename from bundle/tests/bundle/python_wheel/bundle.yml rename to bundle/tests/python_wheel/python_wheel/bundle.yml diff --git a/bundle/tests/python_wheel/python_wheel/my_test_code/setup.py b/bundle/tests/python_wheel/python_wheel/my_test_code/setup.py new file mode 100644 index 000000000..0bd871dd3 --- /dev/null +++ b/bundle/tests/python_wheel/python_wheel/my_test_code/setup.py @@ -0,0 +1,15 @@ +from setuptools import setup, find_packages + +import src + +setup( + name="my_test_code", + version=src.__version__, + author=src.__author__, + url="https://databricks.com", + author_email="john.doe@databricks.com", + description="my test wheel", + packages=find_packages(include=["src"]), + entry_points={"group_1": "run=src.__main__:main"}, + install_requires=["setuptools"], +) diff --git a/bundle/tests/bundle/python_wheel_no_artifact/my_test_code/__init__.py b/bundle/tests/python_wheel/python_wheel/my_test_code/src/__init__.py similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/my_test_code/__init__.py rename to bundle/tests/python_wheel/python_wheel/my_test_code/src/__init__.py diff --git a/bundle/tests/bundle/python_wheel_no_artifact/my_test_code/__main__.py b/bundle/tests/python_wheel/python_wheel/my_test_code/src/__main__.py similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/my_test_code/__main__.py rename to bundle/tests/python_wheel/python_wheel/my_test_code/src/__main__.py diff --git a/bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml b/bundle/tests/python_wheel/python_wheel_dbfs_lib/bundle.yml similarity index 100% rename from bundle/tests/bundle/python_wheel_dbfs_lib/bundle.yml rename to bundle/tests/python_wheel/python_wheel_dbfs_lib/bundle.yml diff --git a/bundle/tests/bundle/python_wheel_no_artifact_no_setup/.gitignore b/bundle/tests/python_wheel/python_wheel_no_artifact/.gitignore similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact_no_setup/.gitignore rename to bundle/tests/python_wheel/python_wheel_no_artifact/.gitignore diff --git a/bundle/tests/bundle/python_wheel_no_artifact/bundle.yml b/bundle/tests/python_wheel/python_wheel_no_artifact/bundle.yml similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/bundle.yml rename to bundle/tests/python_wheel/python_wheel_no_artifact/bundle.yml diff --git a/bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__init__.py b/bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__init__.py new file mode 100644 index 000000000..909f1f322 --- /dev/null +++ b/bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__init__.py @@ -0,0 +1,2 @@ +__version__ = "0.0.1" +__author__ = "Databricks" diff --git a/bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__main__.py b/bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__main__.py new file mode 100644 index 000000000..73d045afb --- /dev/null +++ b/bundle/tests/python_wheel/python_wheel_no_artifact/my_test_code/__main__.py @@ -0,0 +1,16 @@ +""" +The entry point of the Python Wheel +""" + +import sys + + +def main(): + # This method will print the provided arguments + print('Hello from my func') + print('Got arguments:') + print(sys.argv) + + +if __name__ == '__main__': + main() diff --git a/bundle/tests/bundle/python_wheel_no_artifact/setup.py b/bundle/tests/python_wheel/python_wheel_no_artifact/setup.py similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact/setup.py rename to bundle/tests/python_wheel/python_wheel_no_artifact/setup.py diff --git a/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/.gitignore b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/.gitignore new file mode 100644 index 000000000..f03e23bc2 --- /dev/null +++ b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/.gitignore @@ -0,0 +1,3 @@ +build/ +*.egg-info +.databricks diff --git a/bundle/tests/bundle/python_wheel_no_artifact_no_setup/bundle.yml b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact_no_setup/bundle.yml rename to bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/bundle.yml diff --git a/bundle/tests/bundle/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl b/bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl similarity index 100% rename from bundle/tests/bundle/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl rename to bundle/tests/python_wheel/python_wheel_no_artifact_no_setup/package/my_test_code-0.0.1-py3-none-any.whl diff --git a/bundle/tests/python_wheel_test.go b/bundle/tests/python_wheel_test.go new file mode 100644 index 000000000..8d0036a7b --- /dev/null +++ b/bundle/tests/python_wheel_test.go @@ -0,0 +1,98 @@ +package config_tests + +import ( + "context" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/libraries" + "github.com/databricks/cli/bundle/phases" + "github.com/stretchr/testify/require" +) + +func TestPythonWheelBuild(t *testing.T) { + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel/python_wheel") + require.NoError(t, err) + + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) + require.NoError(t, diags.Error()) + + matches, err := filepath.Glob("./python_wheel/python_wheel/my_test_code/dist/my_test_code-*.whl") + require.NoError(t, err) + require.Equal(t, 1, len(matches)) + + match := libraries.ValidateLocalLibrariesExist() + diags = bundle.Apply(ctx, b, match) + require.NoError(t, diags.Error()) +} + +func TestPythonWheelBuildAutoDetect(t *testing.T) { + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact") + require.NoError(t, err) + + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) + require.NoError(t, diags.Error()) + + matches, err := filepath.Glob("./python_wheel/python_wheel_no_artifact/dist/my_test_code-*.whl") + require.NoError(t, err) + require.Equal(t, 1, len(matches)) + + match := libraries.ValidateLocalLibrariesExist() + diags = bundle.Apply(ctx, b, match) + require.NoError(t, diags.Error()) +} + +func TestPythonWheelWithDBFSLib(t *testing.T) { + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel/python_wheel_dbfs_lib") + require.NoError(t, err) + + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) + require.NoError(t, diags.Error()) + + match := libraries.ValidateLocalLibrariesExist() + diags = bundle.Apply(ctx, b, match) + require.NoError(t, diags.Error()) +} + +func TestPythonWheelBuildNoBuildJustUpload(t *testing.T) { + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel/python_wheel_no_artifact_no_setup") + require.NoError(t, err) + + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) + require.NoError(t, diags.Error()) + + match := libraries.ValidateLocalLibrariesExist() + diags = bundle.Apply(ctx, b, match) + require.ErrorContains(t, diags.Error(), "./non-existing/*.whl") + + require.NotZero(t, len(b.Config.Artifacts)) + + artifact := b.Config.Artifacts["my_test_code-0.0.1-py3-none-any.whl"] + require.NotNil(t, artifact) + require.Empty(t, artifact.BuildCommand) + require.Contains(t, artifact.Files[0].Source, filepath.Join(b.RootPath, "package", + "my_test_code-0.0.1-py3-none-any.whl", + )) +} + +func TestPythonWheelBuildWithEnvironmentKey(t *testing.T) { + ctx := context.Background() + b, err := bundle.Load(ctx, "./python_wheel/environment_key") + require.NoError(t, err) + + diags := bundle.Apply(ctx, b, bundle.Seq(phases.Load(), phases.Build())) + require.NoError(t, diags.Error()) + + matches, err := filepath.Glob("./python_wheel/environment_key/my_test_code/dist/my_test_code-*.whl") + require.NoError(t, err) + require.Equal(t, 1, len(matches)) + + match := libraries.ValidateLocalLibrariesExist() + diags = bundle.Apply(ctx, b, match) + require.NoError(t, diags.Error()) +} diff --git a/bundle/tests/quality_monitor/databricks.yml b/bundle/tests/quality_monitor/databricks.yml new file mode 100644 index 000000000..6138b9357 --- /dev/null +++ b/bundle/tests/quality_monitor/databricks.yml @@ -0,0 +1,47 @@ +bundle: + name: quality_monitors + +resources: + quality_monitors: + my_monitor: + table_name: "main.test.dev" + assets_dir: "/Shared/provider-test/databricks_monitoring/main.test.thing1" + output_schema_name: "main.dev" + inference_log: + granularities: ["1 day"] + timestamp_col: "timestamp" + prediction_col: "prediction" + model_id_col: "model_id" + problem_type: "PROBLEM_TYPE_REGRESSION" + schedule: + quartz_cron_expression: "0 0 12 * * ?" # every day at noon + timezone_id: UTC + +targets: + development: + mode: development + default: true + resources: + quality_monitors: + my_monitor: + table_name: "main.test.dev" + + staging: + resources: + quality_monitors: + my_monitor: + table_name: "main.test.staging" + output_schema_name: "main.staging" + + production: + resources: + quality_monitors: + my_monitor: + table_name: "main.test.prod" + output_schema_name: "main.prod" + inference_log: + granularities: ["1 hour"] + timestamp_col: "timestamp_prod" + prediction_col: "prediction_prod" + model_id_col: "model_id_prod" + problem_type: "PROBLEM_TYPE_REGRESSION" diff --git a/bundle/tests/quality_monitor_test.go b/bundle/tests/quality_monitor_test.go new file mode 100644 index 000000000..9b91052f5 --- /dev/null +++ b/bundle/tests/quality_monitor_test.go @@ -0,0 +1,59 @@ +package config_tests + +import ( + "testing" + + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/stretchr/testify/assert" +) + +func assertExpectedMonitor(t *testing.T, p *resources.QualityMonitor) { + assert.Equal(t, "timestamp", p.InferenceLog.TimestampCol) + assert.Equal(t, "prediction", p.InferenceLog.PredictionCol) + assert.Equal(t, "model_id", p.InferenceLog.ModelIdCol) + assert.Equal(t, catalog.MonitorInferenceLogProblemType("PROBLEM_TYPE_REGRESSION"), p.InferenceLog.ProblemType) +} + +func TestMonitorTableNames(t *testing.T) { + b := loadTarget(t, "./quality_monitor", "development") + assert.Len(t, b.Config.Resources.QualityMonitors, 1) + assert.Equal(t, b.Config.Bundle.Mode, config.Development) + + p := b.Config.Resources.QualityMonitors["my_monitor"] + assert.Equal(t, "main.test.dev", p.TableName) + assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) + assert.Equal(t, "main.dev", p.OutputSchemaName) + + assertExpectedMonitor(t, p) +} + +func TestMonitorStaging(t *testing.T) { + b := loadTarget(t, "./quality_monitor", "staging") + assert.Len(t, b.Config.Resources.QualityMonitors, 1) + + p := b.Config.Resources.QualityMonitors["my_monitor"] + assert.Equal(t, "main.test.staging", p.TableName) + assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) + assert.Equal(t, "main.staging", p.OutputSchemaName) + + assertExpectedMonitor(t, p) +} + +func TestMonitorProduction(t *testing.T) { + b := loadTarget(t, "./quality_monitor", "production") + assert.Len(t, b.Config.Resources.QualityMonitors, 1) + + p := b.Config.Resources.QualityMonitors["my_monitor"] + assert.Equal(t, "main.test.prod", p.TableName) + assert.Equal(t, "/Shared/provider-test/databricks_monitoring/main.test.thing1", p.AssetsDir) + assert.Equal(t, "main.prod", p.OutputSchemaName) + + inferenceLog := p.InferenceLog + assert.Equal(t, []string{"1 day", "1 hour"}, inferenceLog.Granularities) + assert.Equal(t, "timestamp_prod", p.InferenceLog.TimestampCol) + assert.Equal(t, "prediction_prod", p.InferenceLog.PredictionCol) + assert.Equal(t, "model_id_prod", p.InferenceLog.ModelIdCol) + assert.Equal(t, catalog.MonitorInferenceLogProblemType("PROBLEM_TYPE_REGRESSION"), p.InferenceLog.ProblemType) +} diff --git a/bundle/tests/relative_path_translation/databricks.yml b/bundle/tests/relative_path_translation/databricks.yml new file mode 100644 index 000000000..651ff267c --- /dev/null +++ b/bundle/tests/relative_path_translation/databricks.yml @@ -0,0 +1,33 @@ +bundle: + name: relative_path_translation + +include: + - resources/*.yml + +variables: + file_path: + # This path is expected to be resolved relative to where it is used. + default: ../src/file1.py + +workspace: + file_path: /remote + +targets: + default: + default: true + + override: + variables: + file_path: ./src/file2.py + + resources: + jobs: + job: + tasks: + - task_key: local + spark_python_task: + python_file: ./src/file2.py + + - task_key: variable_reference + spark_python_task: + python_file: ${var.file_path} diff --git a/bundle/tests/relative_path_translation/resources/job.yml b/bundle/tests/relative_path_translation/resources/job.yml new file mode 100644 index 000000000..93f121f25 --- /dev/null +++ b/bundle/tests/relative_path_translation/resources/job.yml @@ -0,0 +1,14 @@ +resources: + jobs: + job: + tasks: + - task_key: local + spark_python_task: + python_file: ../src/file1.py + + - task_key: variable_reference + spark_python_task: + # Note: this is a pure variable reference yet needs to persist the location + # of the reference, not the location of the variable value. + # Also see https://github.com/databricks/cli/issues/1330. + python_file: ${var.file_path} diff --git a/bundle/tests/relative_path_translation/src/file1.py b/bundle/tests/relative_path_translation/src/file1.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/tests/relative_path_translation/src/file2.py b/bundle/tests/relative_path_translation/src/file2.py new file mode 100644 index 000000000..e69de29bb diff --git a/bundle/tests/relative_path_translation_test.go b/bundle/tests/relative_path_translation_test.go new file mode 100644 index 000000000..d5b80bea5 --- /dev/null +++ b/bundle/tests/relative_path_translation_test.go @@ -0,0 +1,53 @@ +package config_tests + +import ( + "context" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func configureMock(t *testing.T, b *bundle.Bundle) { + // Configure mock workspace client + m := mocks.NewMockWorkspaceClient(t) + m.WorkspaceClient.Config = &config.Config{ + Host: "https://mock.databricks.workspace.com", + } + m.GetMockCurrentUserAPI().EXPECT().Me(mock.Anything).Return(&iam.User{ + UserName: "user@domain.com", + }, nil) + b.SetWorkpaceClient(m.WorkspaceClient) +} + +func TestRelativePathTranslationDefault(t *testing.T) { + b := loadTarget(t, "./relative_path_translation", "default") + configureMock(t, b) + + diags := bundle.Apply(context.Background(), b, phases.Initialize()) + require.NoError(t, diags.Error()) + + t0 := b.Config.Resources.Jobs["job"].Tasks[0] + assert.Equal(t, "/remote/src/file1.py", t0.SparkPythonTask.PythonFile) + t1 := b.Config.Resources.Jobs["job"].Tasks[1] + assert.Equal(t, "/remote/src/file1.py", t1.SparkPythonTask.PythonFile) +} + +func TestRelativePathTranslationOverride(t *testing.T) { + b := loadTarget(t, "./relative_path_translation", "override") + configureMock(t, b) + + diags := bundle.Apply(context.Background(), b, phases.Initialize()) + require.NoError(t, diags.Error()) + + t0 := b.Config.Resources.Jobs["job"].Tasks[0] + assert.Equal(t, "/remote/src/file2.py", t0.SparkPythonTask.PythonFile) + t1 := b.Config.Resources.Jobs["job"].Tasks[1] + assert.Equal(t, "/remote/src/file2.py", t1.SparkPythonTask.PythonFile) +} diff --git a/bundle/tests/relative_path_with_includes_test.go b/bundle/tests/relative_path_with_includes_test.go index 92249c412..6e13628be 100644 --- a/bundle/tests/relative_path_with_includes_test.go +++ b/bundle/tests/relative_path_with_includes_test.go @@ -11,17 +11,31 @@ import ( ) func TestRelativePathsWithIncludes(t *testing.T) { - b := load(t, "./relative_path_with_includes") + b := loadTarget(t, "./relative_path_with_includes", "default") m := mutator.TranslatePaths() - err := bundle.Apply(context.Background(), b, m) - assert.NoError(t, err) + diags := bundle.Apply(context.Background(), b, m) + assert.NoError(t, diags.Error()) assert.Equal(t, "artifact_a", b.Config.Artifacts["test_a"].Path) assert.Equal(t, filepath.Join("subfolder", "artifact_b"), b.Config.Artifacts["test_b"].Path) - assert.ElementsMatch(t, []string{"./folder_a/*.*", filepath.Join("subfolder", "folder_c", "*.*")}, b.Config.Sync.Include) - assert.ElementsMatch(t, []string{"./folder_b/*.*", filepath.Join("subfolder", "folder_d", "*.*")}, b.Config.Sync.Exclude) + assert.ElementsMatch( + t, + []string{ + filepath.Join("folder_a", "*.*"), + filepath.Join("subfolder", "folder_c", "*.*"), + }, + b.Config.Sync.Include, + ) + assert.ElementsMatch( + t, + []string{ + filepath.Join("folder_b", "*.*"), + filepath.Join("subfolder", "folder_d", "*.*"), + }, + b.Config.Sync.Exclude, + ) assert.Equal(t, filepath.Join("dist", "job_a.whl"), b.Config.Resources.Jobs["job_a"].Tasks[0].Libraries[0].Whl) assert.Equal(t, filepath.Join("subfolder", "dist", "job_b.whl"), b.Config.Resources.Jobs["job_b"].Tasks[0].Libraries[0].Whl) diff --git a/bundle/tests/run_as/allowed/databricks.yml b/bundle/tests/run_as/allowed/databricks.yml new file mode 100644 index 000000000..6cb9cd5a4 --- /dev/null +++ b/bundle/tests/run_as/allowed/databricks.yml @@ -0,0 +1,52 @@ +bundle: + name: "run_as" + +run_as: + service_principal_name: "my_service_principal" + +targets: + development: + mode: development + run_as: + user_name: "my_user_name" + +resources: + jobs: + job_one: + name: Job One + + tasks: + - task_key: "task_one" + notebook_task: + notebook_path: "./test.py" + + job_two: + name: Job Two + + tasks: + - task_key: "task_two" + notebook_task: + notebook_path: "./test.py" + + job_three: + name: Job Three + + run_as: + service_principal_name: "my_service_principal_for_job" + + tasks: + - task_key: "task_three" + notebook_task: + notebook_path: "./test.py" + + models: + model_one: + name: "skynet" + + registered_models: + model_two: + name: "skynet (in UC)" + + experiments: + experiment_one: + name: "experiment_one" diff --git a/bundle/tests/run_as/databricks.yml b/bundle/tests/run_as/legacy/databricks.yml similarity index 61% rename from bundle/tests/run_as/databricks.yml rename to bundle/tests/run_as/legacy/databricks.yml index 18ea55736..e47224dbb 100644 --- a/bundle/tests/run_as/databricks.yml +++ b/bundle/tests/run_as/legacy/databricks.yml @@ -4,39 +4,65 @@ bundle: run_as: service_principal_name: "my_service_principal" -targets: - development: - mode: development - run_as: - user_name: "my_user_name" +experimental: + use_legacy_run_as: true resources: + jobs: + job_one: + name: Job One + + tasks: + - task_key: "task_one" + notebook_task: + notebook_path: "./test.py" + + job_two: + name: Job Two + + tasks: + - task_key: "task_two" + notebook_task: + notebook_path: "./test.py" + + job_three: + name: Job Three + + run_as: + service_principal_name: "my_service_principal_for_job" + + tasks: + - task_key: "task_three" + notebook_task: + notebook_path: "./test.py" + pipelines: nyc_taxi_pipeline: + name: "nyc taxi loader" + permissions: - level: CAN_VIEW service_principal_name: my_service_principal - level: CAN_VIEW user_name: my_user_name - name: "nyc taxi loader" + libraries: - notebook: path: ./dlt/nyc_taxi_loader - jobs: - job_one: - name: Job One - tasks: - - task: - notebook_path: "./test.py" - job_two: - name: Job Two - tasks: - - task: - notebook_path: "./test.py" - job_three: - name: Job Three - run_as: - service_principal_name: "my_service_principal_for_job" - tasks: - - task: - notebook_path: "./test.py" + + + models: + model_one: + name: "skynet" + + registered_models: + model_two: + name: "skynet (in UC)" + + experiments: + experiment_one: + name: "experiment_one" + + model_serving_endpoints: + model_serving_one: + name: "skynet" diff --git a/bundle/tests/run_as/not_allowed/both_sp_and_user/databricks.yml b/bundle/tests/run_as/not_allowed/both_sp_and_user/databricks.yml new file mode 100644 index 000000000..dfab50e94 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/both_sp_and_user/databricks.yml @@ -0,0 +1,17 @@ +bundle: + name: "run_as" + +# This is not allowed because both service_principal_name and user_name are set +run_as: + service_principal_name: "my_service_principal" + user_name: "my_user_name" + +resources: + jobs: + job_one: + name: Job One + + tasks: + - task_key: "task_one" + notebook_task: + notebook_path: "./test.py" diff --git a/bundle/tests/run_as/not_allowed/model_serving/databricks.yml b/bundle/tests/run_as/not_allowed/model_serving/databricks.yml new file mode 100644 index 000000000..cdd7e0913 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/model_serving/databricks.yml @@ -0,0 +1,15 @@ +bundle: + name: "run_as" + +run_as: + service_principal_name: "my_service_principal" + +targets: + development: + run_as: + user_name: "my_user_name" + +resources: + model_serving_endpoints: + foo: + name: "skynet" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml new file mode 100644 index 000000000..a328fbd8c --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml @@ -0,0 +1,4 @@ +bundle: + name: "abc" + +run_as: diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml new file mode 100644 index 000000000..be18f60e8 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml @@ -0,0 +1,5 @@ +bundle: + name: "abc" + +run_as: + service_principal_name: "" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml new file mode 100644 index 000000000..33c48cb58 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml @@ -0,0 +1,5 @@ +bundle: + name: "abc" + +run_as: + user_name: "" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml new file mode 100644 index 000000000..4b59dc918 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml @@ -0,0 +1,6 @@ +bundle: + name: "abc" + +run_as: + service_principal_name: "" + user_name: "" diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/databricks.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/databricks.yml new file mode 100644 index 000000000..f7c1d728d --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/databricks.yml @@ -0,0 +1,8 @@ +bundle: + name: "abc" + +run_as: + user_name: "my_user_name" + +include: + - ./override.yml diff --git a/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/override.yml b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/override.yml new file mode 100644 index 000000000..d093e4c95 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/neither_sp_nor_user/override/override.yml @@ -0,0 +1,4 @@ +targets: + development: + default: true + run_as: diff --git a/bundle/tests/run_as/not_allowed/pipelines/databricks.yml b/bundle/tests/run_as/not_allowed/pipelines/databricks.yml new file mode 100644 index 000000000..d59c34ab6 --- /dev/null +++ b/bundle/tests/run_as/not_allowed/pipelines/databricks.yml @@ -0,0 +1,25 @@ +bundle: + name: "run_as" + +run_as: + service_principal_name: "my_service_principal" + +targets: + development: + run_as: + user_name: "my_user_name" + +resources: + pipelines: + nyc_taxi_pipeline: + name: "nyc taxi loader" + + permissions: + - level: CAN_VIEW + service_principal_name: my_service_principal + - level: CAN_VIEW + user_name: my_user_name + + libraries: + - notebook: + path: ./dlt/nyc_taxi_loader diff --git a/bundle/tests/run_as_test.go b/bundle/tests/run_as_test.go index 44c068165..6c07cc537 100644 --- a/bundle/tests/run_as_test.go +++ b/bundle/tests/run_as_test.go @@ -2,29 +2,41 @@ package config_tests import ( "context" + "fmt" + "path/filepath" "testing" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/config/mutator" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/ml" + "github.com/databricks/databricks-sdk-go/service/serving" "github.com/stretchr/testify/assert" ) -func TestRunAsDefault(t *testing.T) { - b := load(t, "./run_as") - b.Config.Workspace.CurrentUser = &config.User{ - User: &iam.User{ - UserName: "jane@doe.com", - }, - } +func TestRunAsForAllowed(t *testing.T) { + b := load(t, "./run_as/allowed") + ctx := context.Background() - err := bundle.Apply(ctx, b, mutator.SetRunAs()) - assert.NoError(t, err) + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) assert.Len(t, b.Config.Resources.Jobs, 3) jobs := b.Config.Resources.Jobs + // job_one and job_two should have the same run_as identity as the bundle. assert.NotNil(t, jobs["job_one"].RunAs) assert.Equal(t, "my_service_principal", jobs["job_one"].RunAs.ServicePrincipalName) assert.Equal(t, "", jobs["job_one"].RunAs.UserName) @@ -33,33 +45,38 @@ func TestRunAsDefault(t *testing.T) { assert.Equal(t, "my_service_principal", jobs["job_two"].RunAs.ServicePrincipalName) assert.Equal(t, "", jobs["job_two"].RunAs.UserName) + // job_three should retain the job level run_as identity. assert.NotNil(t, jobs["job_three"].RunAs) assert.Equal(t, "my_service_principal_for_job", jobs["job_three"].RunAs.ServicePrincipalName) assert.Equal(t, "", jobs["job_three"].RunAs.UserName) - pipelines := b.Config.Resources.Pipelines - assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2) - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].Level, "CAN_VIEW") - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].UserName, "my_user_name") - - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].Level, "IS_OWNER") - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].ServicePrincipalName, "my_service_principal") + // Assert other resources are not affected. + assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model) + assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest) + assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment) } -func TestRunAsDevelopment(t *testing.T) { - b := loadTarget(t, "./run_as", "development") - b.Config.Workspace.CurrentUser = &config.User{ - User: &iam.User{ - UserName: "jane@doe.com", - }, - } +func TestRunAsForAllowedWithTargetOverride(t *testing.T) { + b := loadTarget(t, "./run_as/allowed", "development") + ctx := context.Background() - err := bundle.Apply(ctx, b, mutator.SetRunAs()) - assert.NoError(t, err) + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) assert.Len(t, b.Config.Resources.Jobs, 3) jobs := b.Config.Resources.Jobs + // job_one and job_two should have the same run_as identity as the bundle's + // development target. assert.NotNil(t, jobs["job_one"].RunAs) assert.Equal(t, "", jobs["job_one"].RunAs.ServicePrincipalName) assert.Equal(t, "my_user_name", jobs["job_one"].RunAs.UserName) @@ -68,15 +85,228 @@ func TestRunAsDevelopment(t *testing.T) { assert.Equal(t, "", jobs["job_two"].RunAs.ServicePrincipalName) assert.Equal(t, "my_user_name", jobs["job_two"].RunAs.UserName) + // job_three should retain the job level run_as identity. assert.NotNil(t, jobs["job_three"].RunAs) assert.Equal(t, "my_service_principal_for_job", jobs["job_three"].RunAs.ServicePrincipalName) assert.Equal(t, "", jobs["job_three"].RunAs.UserName) + // Assert other resources are not affected. + assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model) + assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest) + assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment) + +} + +func TestRunAsErrorForPipelines(t *testing.T) { + b := load(t, "./run_as/not_allowed/pipelines") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + + configPath := filepath.FromSlash("run_as/not_allowed/pipelines/databricks.yml") + assert.EqualError(t, err, fmt.Sprintf("pipelines are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath)) +} + +func TestRunAsNoErrorForPipelines(t *testing.T) { + b := load(t, "./run_as/not_allowed/pipelines") + + // We should not error because the pipeline is being deployed with the same + // identity as the bundle run_as identity. + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) +} + +func TestRunAsErrorForModelServing(t *testing.T) { + b := load(t, "./run_as/not_allowed/model_serving") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + + configPath := filepath.FromSlash("run_as/not_allowed/model_serving/databricks.yml") + assert.EqualError(t, err, fmt.Sprintf("model_serving_endpoints are not supported when the current deployment user is different from the bundle's run_as identity. Please deploy as the run_as identity. Please refer to the documentation at https://docs.databricks.com/dev-tools/bundles/run-as.html for more details. Location of the unsupported resource: %s:14:5. Current identity: jane@doe.com. Run as identity: my_service_principal", configPath)) +} + +func TestRunAsNoErrorForModelServingEndpoints(t *testing.T) { + b := load(t, "./run_as/not_allowed/model_serving") + + // We should not error because the model serving endpoint is being deployed + // with the same identity as the bundle run_as identity. + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) +} + +func TestRunAsErrorWhenBothUserAndSpSpecified(t *testing.T) { + b := load(t, "./run_as/not_allowed/both_sp_and_user") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + + configPath := filepath.FromSlash("run_as/not_allowed/both_sp_and_user/databricks.yml") + assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. A service_principal_name \"my_service_principal\" is specified at %s:6:27. A user_name \"my_user_name\" is defined at %s:7:14", configPath, configPath)) +} + +func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) { + tcases := []struct { + name string + err string + }{ + { + name: "empty_run_as", + err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:8", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_run_as/databricks.yml")), + }, + { + name: "empty_sp", + err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:5:3", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_sp/databricks.yml")), + }, + { + name: "empty_user", + err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:5:3", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_user/databricks.yml")), + }, + { + name: "empty_user_and_sp", + err: fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:5:3", filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/empty_user_and_sp/databricks.yml")), + }, + } + + for _, tc := range tcases { + t.Run(tc.name, func(t *testing.T) { + + bundlePath := fmt.Sprintf("./run_as/not_allowed/neither_sp_nor_user/%s", tc.name) + b := load(t, bundlePath) + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + assert.EqualError(t, err, tc.err) + }) + } +} + +func TestRunAsErrorNeitherUserOrSpSpecifiedAtTargetOverride(t *testing.T) { + b := loadTarget(t, "./run_as/not_allowed/neither_sp_nor_user/override", "development") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "my_service_principal", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + err := diags.Error() + + configPath := filepath.FromSlash("run_as/not_allowed/neither_sp_nor_user/override/override.yml") + assert.EqualError(t, err, fmt.Sprintf("run_as section must specify exactly one identity. Neither service_principal_name nor user_name is specified at %s:4:12", configPath)) +} + +func TestLegacyRunAs(t *testing.T) { + b := load(t, "./run_as/legacy") + + ctx := context.Background() + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &config.User{ + User: &iam.User{ + UserName: "jane@doe.com", + }, + } + return nil + }) + + diags := bundle.Apply(ctx, b, mutator.SetRunAs()) + assert.NoError(t, diags.Error()) + + assert.Len(t, b.Config.Resources.Jobs, 3) + jobs := b.Config.Resources.Jobs + + // job_one and job_two should have the same run_as identity as the bundle. + assert.NotNil(t, jobs["job_one"].RunAs) + assert.Equal(t, "my_service_principal", jobs["job_one"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_one"].RunAs.UserName) + + assert.NotNil(t, jobs["job_two"].RunAs) + assert.Equal(t, "my_service_principal", jobs["job_two"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_two"].RunAs.UserName) + + // job_three should retain it's run_as identity. + assert.NotNil(t, jobs["job_three"].RunAs) + assert.Equal(t, "my_service_principal_for_job", jobs["job_three"].RunAs.ServicePrincipalName) + assert.Equal(t, "", jobs["job_three"].RunAs.UserName) + + // Assert owner permissions for pipelines are set. pipelines := b.Config.Resources.Pipelines assert.Len(t, pipelines["nyc_taxi_pipeline"].Permissions, 2) - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].Level, "CAN_VIEW") - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[0].ServicePrincipalName, "my_service_principal") - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].Level, "IS_OWNER") - assert.Equal(t, pipelines["nyc_taxi_pipeline"].Permissions[1].UserName, "my_user_name") + assert.Equal(t, "CAN_VIEW", pipelines["nyc_taxi_pipeline"].Permissions[0].Level) + assert.Equal(t, "my_user_name", pipelines["nyc_taxi_pipeline"].Permissions[0].UserName) + + assert.Equal(t, "IS_OWNER", pipelines["nyc_taxi_pipeline"].Permissions[1].Level) + assert.Equal(t, "my_service_principal", pipelines["nyc_taxi_pipeline"].Permissions[1].ServicePrincipalName) + + // Assert other resources are not affected. + assert.Equal(t, ml.Model{Name: "skynet"}, *b.Config.Resources.Models["model_one"].Model) + assert.Equal(t, catalog.CreateRegisteredModelRequest{Name: "skynet (in UC)"}, *b.Config.Resources.RegisteredModels["model_two"].CreateRegisteredModelRequest) + assert.Equal(t, ml.Experiment{Name: "experiment_one"}, *b.Config.Resources.Experiments["experiment_one"].Experiment) + assert.Equal(t, serving.CreateServingEndpoint{Name: "skynet"}, *b.Config.Resources.ModelServingEndpoints["model_serving_one"].CreateServingEndpoint) } diff --git a/bundle/tests/suggest_target_test.go b/bundle/tests/suggest_target_test.go index 924d6a4e1..8fb130409 100644 --- a/bundle/tests/suggest_target_test.go +++ b/bundle/tests/suggest_target_test.go @@ -4,14 +4,19 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/cmd/root" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/databricks/cli/internal" - "github.com/stretchr/testify/require" ) func TestSuggestTargetIfWrongPassed(t *testing.T) { t.Setenv("BUNDLE_ROOT", filepath.Join("target_overrides", "workspace")) - _, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") - require.ErrorContains(t, err, "Available targets:") - require.ErrorContains(t, err, "development") - require.ErrorContains(t, err, "staging") + stdoutBytes, _, err := internal.RequireErrorRun(t, "bundle", "validate", "-e", "incorrect") + stdout := stdoutBytes.String() + + assert.Error(t, root.ErrAlreadyPrinted, err) + assert.Contains(t, stdout, "Available targets:") + assert.Contains(t, stdout, "development") + assert.Contains(t, stdout, "staging") } diff --git a/bundle/tests/sync/nil/databricks.yml b/bundle/tests/sync/nil/databricks.yml new file mode 100644 index 000000000..a8b4b901e --- /dev/null +++ b/bundle/tests/sync/nil/databricks.yml @@ -0,0 +1,19 @@ +bundle: + name: sync_nil + +workspace: + host: https://acme.cloud.databricks.com/ + +sync: + include: ~ + exclude: ~ + +targets: + development: + + staging: + sync: + include: + - tests/* + exclude: + - dist diff --git a/bundle/tests/sync/nil_root/databricks.yml b/bundle/tests/sync/nil_root/databricks.yml new file mode 100644 index 000000000..44e6c48ea --- /dev/null +++ b/bundle/tests/sync/nil_root/databricks.yml @@ -0,0 +1,17 @@ +bundle: + name: sync_nil_root + +workspace: + host: https://acme.cloud.databricks.com/ + +sync: ~ + +targets: + development: + + staging: + sync: + include: + - tests/* + exclude: + - dist diff --git a/bundle/tests/override_sync/databricks.yml b/bundle/tests/sync/override/databricks.yml similarity index 93% rename from bundle/tests/override_sync/databricks.yml rename to bundle/tests/sync/override/databricks.yml index 1417b8644..8bb0e1def 100644 --- a/bundle/tests/override_sync/databricks.yml +++ b/bundle/tests/sync/override/databricks.yml @@ -1,5 +1,5 @@ bundle: - name: override_sync + name: sync_override workspace: host: https://acme.cloud.databricks.com/ diff --git a/bundle/tests/override_sync_no_root/databricks.yml b/bundle/tests/sync/override_no_root/databricks.yml similarity index 90% rename from bundle/tests/override_sync_no_root/databricks.yml rename to bundle/tests/sync/override_no_root/databricks.yml index 109d8da1f..bd1bfe8e0 100644 --- a/bundle/tests/override_sync_no_root/databricks.yml +++ b/bundle/tests/sync/override_no_root/databricks.yml @@ -1,5 +1,5 @@ bundle: - name: override_sync + name: sync_override_no_root workspace: host: https://acme.cloud.databricks.com/ diff --git a/bundle/tests/sync_include_exclude_no_matches_test.go b/bundle/tests/sync_include_exclude_no_matches_test.go new file mode 100644 index 000000000..94cedbaa6 --- /dev/null +++ b/bundle/tests/sync_include_exclude_no_matches_test.go @@ -0,0 +1,39 @@ +package config_tests + +import ( + "context" + "fmt" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/validate" + "github.com/databricks/cli/libs/diag" + "github.com/stretchr/testify/require" +) + +func TestSyncIncludeExcludeNoMatchesTest(t *testing.T) { + b := loadTarget(t, "./sync/override", "development") + + diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), validate.ValidateSyncPatterns()) + require.Len(t, diags, 3) + require.NoError(t, diags.Error()) + + require.Equal(t, diags[0].Severity, diag.Warning) + require.Equal(t, diags[0].Summary, "Pattern dist does not match any files") + require.Equal(t, diags[0].Location.File, filepath.Join("sync", "override", "databricks.yml")) + require.Equal(t, diags[0].Location.Line, 17) + require.Equal(t, diags[0].Location.Column, 11) + require.Equal(t, diags[0].Path.String(), "sync.exclude[0]") + + summaries := []string{ + fmt.Sprintf("Pattern %s does not match any files", filepath.Join("src", "*")), + fmt.Sprintf("Pattern %s does not match any files", filepath.Join("tests", "*")), + } + + require.Equal(t, diags[1].Severity, diag.Warning) + require.Contains(t, summaries, diags[1].Summary) + + require.Equal(t, diags[2].Severity, diag.Warning) + require.Contains(t, summaries, diags[2].Summary) +} diff --git a/bundle/tests/sync_test.go b/bundle/tests/sync_test.go new file mode 100644 index 000000000..d08e889c3 --- /dev/null +++ b/bundle/tests/sync_test.go @@ -0,0 +1,65 @@ +package config_tests + +import ( + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle" + "github.com/stretchr/testify/assert" +) + +func TestSyncOverride(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/override", "development") + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*"), filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override", "prod") + assert.ElementsMatch(t, []string{filepath.FromSlash("src/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) +} + +func TestSyncOverrideNoRootSync(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/override_no_root", "development") + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override_no_root", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("fixtures/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/override_no_root", "prod") + assert.ElementsMatch(t, []string{}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{}, b.Config.Sync.Exclude) +} + +func TestSyncNil(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/nil", "development") + assert.Nil(t, b.Config.Sync.Include) + assert.Nil(t, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/nil", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) +} + +func TestSyncNilRoot(t *testing.T) { + var b *bundle.Bundle + + b = loadTarget(t, "./sync/nil_root", "development") + assert.Nil(t, b.Config.Sync.Include) + assert.Nil(t, b.Config.Sync.Exclude) + + b = loadTarget(t, "./sync/nil_root", "staging") + assert.ElementsMatch(t, []string{filepath.FromSlash("tests/*")}, b.Config.Sync.Include) + assert.ElementsMatch(t, []string{filepath.FromSlash("dist")}, b.Config.Sync.Exclude) +} diff --git a/bundle/tests/undefined_job/databricks.yml b/bundle/tests/undefined_job/databricks.yml new file mode 100644 index 000000000..12c19f946 --- /dev/null +++ b/bundle/tests/undefined_job/databricks.yml @@ -0,0 +1,8 @@ +bundle: + name: undefined-job + +resources: + jobs: + undefined: + test: + name: "Test Job" diff --git a/bundle/tests/undefined_job_test.go b/bundle/tests/undefined_job_test.go new file mode 100644 index 000000000..ed502c471 --- /dev/null +++ b/bundle/tests/undefined_job_test.go @@ -0,0 +1,12 @@ +package config_tests + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestUndefinedJobLoadsWithError(t *testing.T) { + _, diags := loadTargetWithDiags("./undefined_job", "default") + assert.ErrorContains(t, diags.Error(), "job undefined is not defined") +} diff --git a/bundle/tests/variables/complex/databricks.yml b/bundle/tests/variables/complex/databricks.yml new file mode 100644 index 000000000..ca27f606d --- /dev/null +++ b/bundle/tests/variables/complex/databricks.yml @@ -0,0 +1,51 @@ +bundle: + name: complex-variables + +resources: + jobs: + my_job: + job_clusters: + - job_cluster_key: key + new_cluster: ${var.cluster} + tasks: + - task_key: test + job_cluster_key: key + libraries: ${variables.libraries.value} + task_key: "task with spark version ${var.cluster.spark_version} and jar ${var.libraries[0].jar}" + +variables: + node_type: + default: "Standard_DS3_v2" + cluster: + type: complex + description: "A cluster definition" + default: + spark_version: "13.2.x-scala2.11" + node_type_id: ${var.node_type} + num_workers: 2 + policy_id: "some-policy-id" + spark_conf: + spark.speculation: true + spark.databricks.delta.retentionDurationCheck.enabled: false + spark.random: true + libraries: + type: complex + description: "A libraries definition" + default: + - jar: "/path/to/jar" + - egg: "/path/to/egg" + - whl: "/path/to/whl" + + +targets: + default: + dev: + variables: + node_type: "Standard_DS3_v3" + cluster: + spark_version: "14.2.x-scala2.11" + node_type_id: ${var.node_type} + num_workers: 4 + spark_conf: + spark.speculation: false + spark.databricks.delta.retentionDurationCheck.enabled: false diff --git a/bundle/tests/variables/empty/databricks.yml b/bundle/tests/variables/empty/databricks.yml new file mode 100644 index 000000000..f90f6211c --- /dev/null +++ b/bundle/tests/variables/empty/databricks.yml @@ -0,0 +1,7 @@ +variables: + a: + description: empty variable + default: + +bundle: + name: empty${var.a} diff --git a/bundle/tests/variables/env_overrides/databricks.yml b/bundle/tests/variables/env_overrides/databricks.yml index e8adb9566..560513bc3 100644 --- a/bundle/tests/variables/env_overrides/databricks.yml +++ b/bundle/tests/variables/env_overrides/databricks.yml @@ -8,14 +8,16 @@ variables: d: description: variable with lookup - lookup: - cluster: some-cluster + default: "" e: description: variable with lookup - lookup: - instance_pool: some-pool + default: "some-value" + f: + description: variable with lookup + lookup: + cluster_policy: wrong-cluster-policy bundle: name: test bundle @@ -49,4 +51,7 @@ targets: e: lookup: instance_pool: some-test-instance-pool + f: + lookup: + cluster_policy: some-test-cluster-policy b: prod-b diff --git a/bundle/tests/variables/variable_overrides_in_target/databricks.yml b/bundle/tests/variables/variable_overrides_in_target/databricks.yml new file mode 100644 index 000000000..4e52b5073 --- /dev/null +++ b/bundle/tests/variables/variable_overrides_in_target/databricks.yml @@ -0,0 +1,41 @@ +bundle: + name: foobar + +resources: + pipelines: + my_pipeline: + name: ${var.foo} + continuous: ${var.baz} + clusters: + - num_workers: ${var.bar} + + + +variables: + foo: + default: "a_string" + description: "A string variable" + + bar: + default: 42 + description: "An integer variable" + + baz: + default: true + description: "A boolean variable" + +targets: + use-default-variable-values: + + override-string-variable: + variables: + foo: "overridden_string" + + override-int-variable: + variables: + bar: 43 + + override-both-bool-and-string-variables: + variables: + foo: "overridden_string" + baz: false diff --git a/bundle/tests/variables_test.go b/bundle/tests/variables_test.go index 91e165b15..51a23e5d5 100644 --- a/bundle/tests/variables_test.go +++ b/bundle/tests/variables_test.go @@ -5,115 +5,197 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/interpolation" "github.com/databricks/cli/bundle/config/mutator" - "github.com/databricks/cli/bundle/config/variable" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/mock" "github.com/stretchr/testify/require" ) func TestVariables(t *testing.T) { t.Setenv("BUNDLE_VAR_b", "def") b := load(t, "./variables/vanilla") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) - require.NoError(t, err) + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) assert.Equal(t, "abc def", b.Config.Bundle.Name) } func TestVariablesLoadingFailsWhenRequiredVariableIsNotSpecified(t *testing.T) { b := load(t, "./variables/vanilla") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) - assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") + mutator.ResolveVariableReferences( + "variables", + ), + )) + assert.ErrorContains(t, diags.Error(), "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") } func TestVariablesTargetsBlockOverride(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-with-single-variable-override"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) - require.NoError(t, err) + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) assert.Equal(t, "default-a dev-b", b.Config.Workspace.Profile) } func TestVariablesTargetsBlockOverrideForMultipleVariables(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-with-two-variable-overrides"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) - require.NoError(t, err) + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) assert.Equal(t, "prod-a prod-b", b.Config.Workspace.Profile) } func TestVariablesTargetsBlockOverrideWithProcessEnvVars(t *testing.T) { t.Setenv("BUNDLE_VAR_b", "env-var-b") b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-with-two-variable-overrides"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) - require.NoError(t, err) + mutator.ResolveVariableReferences( + "variables", + ), + )) + require.NoError(t, diags.Error()) assert.Equal(t, "prod-a env-var-b", b.Config.Workspace.Profile) } func TestVariablesTargetsBlockOverrideWithMissingVariables(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-missing-a-required-variable-assignment"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) - assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") + mutator.ResolveVariableReferences( + "variables", + ), + )) + assert.ErrorContains(t, diags.Error(), "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable") } func TestVariablesTargetsBlockOverrideWithUndefinedVariables(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-using-an-undefined-variable"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) - assert.ErrorContains(t, err, "variable c is not defined but is assigned a value") + mutator.ResolveVariableReferences( + "variables", + ), + )) + assert.ErrorContains(t, diags.Error(), "variable c is not defined but is assigned a value") } func TestVariablesWithoutDefinition(t *testing.T) { t.Setenv("BUNDLE_VAR_a", "foo") t.Setenv("BUNDLE_VAR_b", "bar") b := load(t, "./variables/without_definition") - err := bundle.Apply(context.Background(), b, mutator.SetVariables()) - require.NoError(t, err) + diags := bundle.Apply(context.Background(), b, mutator.SetVariables()) + require.NoError(t, diags.Error()) require.True(t, b.Config.Variables["a"].HasValue()) require.True(t, b.Config.Variables["b"].HasValue()) - assert.Equal(t, "foo", *b.Config.Variables["a"].Value) - assert.Equal(t, "bar", *b.Config.Variables["b"].Value) + assert.Equal(t, "foo", b.Config.Variables["a"].Value) + assert.Equal(t, "bar", b.Config.Variables["b"].Value) } func TestVariablesWithTargetLookupOverrides(t *testing.T) { b := load(t, "./variables/env_overrides") - err := bundle.Apply(context.Background(), b, bundle.Seq( + + mockWorkspaceClient := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(mockWorkspaceClient.WorkspaceClient) + instancePoolApi := mockWorkspaceClient.GetMockInstancePoolsAPI() + instancePoolApi.EXPECT().GetByInstancePoolName(mock.Anything, "some-test-instance-pool").Return(&compute.InstancePoolAndStats{ + InstancePoolId: "1234", + }, nil) + + clustersApi := mockWorkspaceClient.GetMockClustersAPI() + clustersApi.EXPECT().GetByClusterName(mock.Anything, "some-test-cluster").Return(&compute.ClusterDetails{ + ClusterId: "4321", + }, nil) + + clusterPoliciesApi := mockWorkspaceClient.GetMockClusterPoliciesAPI() + clusterPoliciesApi.EXPECT().GetByName(mock.Anything, "some-test-cluster-policy").Return(&compute.Policy{ + PolicyId: "9876", + }, nil) + + diags := bundle.Apply(context.Background(), b, bundle.Seq( mutator.SelectTarget("env-overrides-lookup"), mutator.SetVariables(), - interpolation.Interpolate( - interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix), - ))) - require.NoError(t, err) - assert.Equal(t, "cluster: some-test-cluster", b.Config.Variables["d"].Lookup.String()) - assert.Equal(t, "instance-pool: some-test-instance-pool", b.Config.Variables["e"].Lookup.String()) + mutator.ResolveResourceReferences(), + )) + + require.NoError(t, diags.Error()) + assert.Equal(t, "4321", b.Config.Variables["d"].Value) + assert.Equal(t, "1234", b.Config.Variables["e"].Value) + assert.Equal(t, "9876", b.Config.Variables["f"].Value) +} + +func TestVariableTargetOverrides(t *testing.T) { + var tcases = []struct { + targetName string + pipelineName string + pipelineContinuous bool + pipelineNumWorkers int + }{ + { + "use-default-variable-values", + "a_string", + true, + 42, + }, + { + "override-string-variable", + "overridden_string", + true, + 42, + }, + { + "override-int-variable", + "a_string", + true, + 43, + }, + { + "override-both-bool-and-string-variables", + "overridden_string", + false, + 42, + }, + } + + for _, tcase := range tcases { + t.Run(tcase.targetName, func(t *testing.T) { + b := loadTarget(t, "./variables/variable_overrides_in_target", tcase.targetName) + diags := bundle.Apply(context.Background(), b, bundle.Seq( + mutator.SetVariables(), + mutator.ResolveVariableReferences("variables")), + ) + require.NoError(t, diags.Error()) + + assert.Equal(t, tcase.pipelineName, b.Config.Resources.Pipelines["my_pipeline"].Name) + assert.Equal(t, tcase.pipelineContinuous, b.Config.Resources.Pipelines["my_pipeline"].Continuous) + assert.Equal(t, tcase.pipelineNumWorkers, b.Config.Resources.Pipelines["my_pipeline"].Clusters[0].NumWorkers) + }) + } +} + +func TestBundleWithEmptyVariableLoads(t *testing.T) { + b := load(t, "./variables/empty") + diags := bundle.Apply(context.Background(), b, mutator.SetVariables()) + require.ErrorContains(t, diags.Error(), "no value assigned to required variable a") } diff --git a/bundle/tests/yaml_anchors_separate_block/databricks.yml b/bundle/tests/yaml_anchors_separate_block/databricks.yml new file mode 100644 index 000000000..447d5d0bb --- /dev/null +++ b/bundle/tests/yaml_anchors_separate_block/databricks.yml @@ -0,0 +1,15 @@ +bundle: + name: yaml_anchors_separate_block + +tags: &custom_tags + Tag1: "Value1" + Tag2: "Value2" + Tag3: "Value3" + +resources: + jobs: + my_job: + tasks: + - task_key: yaml_anchors_separate_block + tags: + <<: *custom_tags diff --git a/bundle/tests/yaml_anchors_test.go b/bundle/tests/yaml_anchors_test.go index 95cec30ad..5c8497051 100644 --- a/bundle/tests/yaml_anchors_test.go +++ b/bundle/tests/yaml_anchors_test.go @@ -19,6 +19,18 @@ func TestYAMLAnchors(t *testing.T) { require.NotNil(t, t0) require.NotNil(t, t1) + require.NotNil(t, t0.NewCluster) + require.NotNil(t, t1.NewCluster) assert.Equal(t, "10.4.x-scala2.12", t0.NewCluster.SparkVersion) assert.Equal(t, "10.4.x-scala2.12", t1.NewCluster.SparkVersion) } + +func TestYAMLAnchorsNoWarnings(t *testing.T) { + _, diags := loadTargetWithDiags("./yaml_anchors", "default") + assert.Empty(t, diags) +} + +func TestYAMLAnchorsSeparateBlockNoWarnings(t *testing.T) { + _, diags := loadTargetWithDiags("./yaml_anchors_separate_block", "default") + assert.Empty(t, diags) +} diff --git a/cmd/account/access-control/access-control.go b/cmd/account/access-control/access-control.go index 36b69d01d..f6761a1b4 100755 --- a/cmd/account/access-control/access-control.go +++ b/cmd/account/access-control/access-control.go @@ -29,6 +29,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGetAssignableRolesForResource()) + cmd.AddCommand(newGetRuleSet()) + cmd.AddCommand(newUpdateRuleSet()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -67,7 +72,7 @@ func newGetAssignableRolesForResource() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -97,12 +102,6 @@ func newGetAssignableRolesForResource() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetAssignableRolesForResource()) - }) -} - // start get-rule-set command // Slice with functions to override default command behavior. @@ -141,7 +140,7 @@ func newGetRuleSet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -172,12 +171,6 @@ func newGetRuleSet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRuleSet()) - }) -} - // start update-rule-set command // Slice with functions to override default command behavior. @@ -239,10 +232,4 @@ func newUpdateRuleSet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateRuleSet()) - }) -} - // end service AccountAccessControl diff --git a/cmd/account/billable-usage/billable-usage.go b/cmd/account/billable-usage/billable-usage.go index ec9b7a639..2a2cca605 100755 --- a/cmd/account/billable-usage/billable-usage.go +++ b/cmd/account/billable-usage/billable-usage.go @@ -25,6 +25,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDownload()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -75,7 +78,7 @@ func newDownload() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -92,7 +95,7 @@ func newDownload() *cobra.Command { return err } defer response.Contents.Close() - return cmdio.RenderReader(ctx, response.Contents) + return cmdio.Render(ctx, response.Contents) } // Disable completions since they are not applicable. @@ -107,10 +110,4 @@ func newDownload() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDownload()) - }) -} - // end service BillableUsage diff --git a/cmd/account/budgets/budgets.go b/cmd/account/budgets/budgets.go index 69237900e..82f7b9f01 100755 --- a/cmd/account/budgets/budgets.go +++ b/cmd/account/budgets/budgets.go @@ -31,6 +31,13 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -98,12 +105,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -174,12 +175,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -251,12 +246,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -281,11 +270,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.Budgets.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.Budgets.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -300,12 +286,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -337,7 +317,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -375,10 +355,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Budgets diff --git a/cmd/account/credentials/credentials.go b/cmd/account/credentials/credentials.go index 72fcd70bd..ed071cda3 100755 --- a/cmd/account/credentials/credentials.go +++ b/cmd/account/credentials/credentials.go @@ -31,6 +31,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -111,12 +117,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -189,12 +189,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -266,12 +260,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -315,10 +303,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service Credentials diff --git a/cmd/account/csp-enablement-account/csp-enablement-account.go b/cmd/account/csp-enablement-account/csp-enablement-account.go new file mode 100755 index 000000000..d6fce9537 --- /dev/null +++ b/cmd/account/csp-enablement-account/csp-enablement-account.go @@ -0,0 +1,159 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package csp_enablement_account + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "csp-enablement-account", + Short: `The compliance security profile settings at the account level control whether to enable it for new workspaces.`, + Long: `The compliance security profile settings at the account level control whether + to enable it for new workspaces. By default, this account-level setting is + disabled for new workspaces. After workspace creation, account admins can + enable the compliance security profile individually for each workspace. + + This settings can be disabled so that new workspaces do not have compliance + security profile enabled by default.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetCspEnablementAccountSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetCspEnablementAccountSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the compliance security profile setting for new workspaces.` + cmd.Long = `Get the compliance security profile setting for new workspaces. + + Gets the compliance security profile setting for new workspaces.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.Settings.CspEnablementAccount().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateCspEnablementAccountSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateCspEnablementAccountSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the compliance security profile setting for new workspaces.` + cmd.Long = `Update the compliance security profile setting for new workspaces. + + Updates the value of the compliance security profile setting for new + workspaces.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := a.Settings.CspEnablementAccount().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service CspEnablementAccount diff --git a/cmd/account/custom-app-integration/custom-app-integration.go b/cmd/account/custom-app-integration/custom-app-integration.go index e6d216dfc..ca9f69a35 100755 --- a/cmd/account/custom-app-integration/custom-app-integration.go +++ b/cmd/account/custom-app-integration/custom-app-integration.go @@ -29,6 +29,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -103,12 +110,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -138,7 +139,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -168,12 +169,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -202,7 +197,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -232,12 +227,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -262,11 +251,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.CustomAppIntegration.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.CustomAppIntegration.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -281,12 +267,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -321,7 +301,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -357,10 +337,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service CustomAppIntegration diff --git a/cmd/account/encryption-keys/encryption-keys.go b/cmd/account/encryption-keys/encryption-keys.go index 3977f5837..44545ccfa 100755 --- a/cmd/account/encryption-keys/encryption-keys.go +++ b/cmd/account/encryption-keys/encryption-keys.go @@ -42,6 +42,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -128,12 +134,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -163,7 +163,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -193,12 +193,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -241,7 +235,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -271,12 +265,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -331,10 +319,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service EncryptionKeys diff --git a/cmd/account/esm-enablement-account/esm-enablement-account.go b/cmd/account/esm-enablement-account/esm-enablement-account.go new file mode 100755 index 000000000..71149e5ad --- /dev/null +++ b/cmd/account/esm-enablement-account/esm-enablement-account.go @@ -0,0 +1,157 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package esm_enablement_account + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "esm-enablement-account", + Short: `The enhanced security monitoring setting at the account level controls whether to enable the feature on new workspaces.`, + Long: `The enhanced security monitoring setting at the account level controls whether + to enable the feature on new workspaces. By default, this account-level + setting is disabled for new workspaces. After workspace creation, account + admins can enable enhanced security monitoring individually for each + workspace.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetEsmEnablementAccountSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetEsmEnablementAccountSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the enhanced security monitoring setting for new workspaces.` + cmd.Long = `Get the enhanced security monitoring setting for new workspaces. + + Gets the enhanced security monitoring setting for new workspaces.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.Settings.EsmEnablementAccount().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateEsmEnablementAccountSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateEsmEnablementAccountSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the enhanced security monitoring setting for new workspaces.` + cmd.Long = `Update the enhanced security monitoring setting for new workspaces. + + Updates the value of the enhanced security monitoring setting for new + workspaces.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := a.Settings.EsmEnablementAccount().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service EsmEnablementAccount diff --git a/cmd/account/groups/groups.go b/cmd/account/groups/groups.go index ed1fa1642..a7e1ac430 100755 --- a/cmd/account/groups/groups.go +++ b/cmd/account/groups/groups.go @@ -33,6 +33,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -79,7 +87,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -114,12 +122,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -190,12 +192,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -266,12 +262,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -305,7 +295,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -314,11 +304,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.Groups.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.Groups.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -333,12 +320,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -420,12 +401,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -514,10 +489,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountGroups diff --git a/cmd/account/ip-access-lists/ip-access-lists.go b/cmd/account/ip-access-lists/ip-access-lists.go index 20511265d..5c6d27dd7 100755 --- a/cmd/account/ip-access-lists/ip-access-lists.go +++ b/cmd/account/ip-access-lists/ip-access-lists.go @@ -48,6 +48,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newReplace()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -108,13 +116,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'label', 'list_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -158,12 +166,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -234,12 +236,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -310,12 +306,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -339,11 +329,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.IpAccessLists.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.IpAccessLists.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -358,12 +345,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start replace command // Slice with functions to override default command behavior. @@ -414,13 +395,13 @@ func newReplace() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only IP_ACCESS_LIST_ID as positional arguments. Provide 'label', 'list_type', 'enabled' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -471,12 +452,6 @@ func newReplace() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReplace()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -573,10 +548,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountIpAccessLists diff --git a/cmd/account/log-delivery/log-delivery.go b/cmd/account/log-delivery/log-delivery.go index 1846e0fdc..4584f4d2b 100755 --- a/cmd/account/log-delivery/log-delivery.go +++ b/cmd/account/log-delivery/log-delivery.go @@ -85,6 +85,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatchStatus()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -146,7 +152,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -181,12 +187,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -258,12 +258,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -294,7 +288,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -303,11 +297,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.LogDelivery.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.LogDelivery.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -322,12 +313,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch-status command // Slice with functions to override default command behavior. @@ -368,13 +353,13 @@ func newPatchStatus() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only LOG_DELIVERY_CONFIGURATION_ID as positional arguments. Provide 'status' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -416,10 +401,4 @@ func newPatchStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatchStatus()) - }) -} - // end service LogDelivery diff --git a/cmd/account/metastore-assignments/metastore-assignments.go b/cmd/account/metastore-assignments/metastore-assignments.go index 619bde507..d7f32ccb9 100755 --- a/cmd/account/metastore-assignments/metastore-assignments.go +++ b/cmd/account/metastore-assignments/metastore-assignments.go @@ -27,6 +27,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -68,7 +75,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -108,12 +115,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -144,7 +145,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -178,12 +179,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -215,7 +210,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -248,12 +243,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -283,7 +272,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -294,11 +283,8 @@ func newList() *cobra.Command { listReq.MetastoreId = args[0] - response, err := a.MetastoreAssignments.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.MetastoreAssignments.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -313,12 +299,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -353,7 +333,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -393,10 +373,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountMetastoreAssignments diff --git a/cmd/account/metastores/metastores.go b/cmd/account/metastores/metastores.go index 797bef5ec..7c8e3f2c1 100755 --- a/cmd/account/metastores/metastores.go +++ b/cmd/account/metastores/metastores.go @@ -26,6 +26,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -63,7 +70,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -98,12 +105,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -134,7 +135,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -164,12 +165,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -198,7 +193,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -228,12 +223,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -257,11 +246,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.Metastores.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.Metastores.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -276,12 +262,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -314,7 +294,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -350,10 +330,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountMetastores diff --git a/cmd/account/network-connectivity/network-connectivity.go b/cmd/account/network-connectivity/network-connectivity.go index 27ab31743..cd8da2905 100755 --- a/cmd/account/network-connectivity/network-connectivity.go +++ b/cmd/account/network-connectivity/network-connectivity.go @@ -21,20 +21,23 @@ func New() *cobra.Command { Use: "network-connectivity", Short: `These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources.`, Long: `These APIs provide configurations for the network connectivity of your - workspaces for serverless compute resources. This API provides stable subnets - for your workspace so that you can configure your firewalls on your Azure - Storage accounts to allow access from Databricks. You can also use the API to - provision private endpoints for Databricks to privately connect serverless - compute resources to your Azure resources using Azure Private Link. See - [configure serverless secure connectivity]. - - [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security`, + workspaces for serverless compute resources.`, GroupID: "settings", Annotations: map[string]string{ "package": "settings", }, } + // Add methods + cmd.AddCommand(newCreateNetworkConnectivityConfiguration()) + cmd.AddCommand(newCreatePrivateEndpointRule()) + cmd.AddCommand(newDeleteNetworkConnectivityConfiguration()) + cmd.AddCommand(newDeletePrivateEndpointRule()) + cmd.AddCommand(newGetNetworkConnectivityConfiguration()) + cmd.AddCommand(newGetPrivateEndpointRule()) + cmd.AddCommand(newListNetworkConnectivityConfigurations()) + cmd.AddCommand(newListPrivateEndpointRules()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -64,41 +67,26 @@ func newCreateNetworkConnectivityConfiguration() *cobra.Command { cmd.Use = "create-network-connectivity-configuration NAME REGION" cmd.Short = `Create a network connectivity configuration.` cmd.Long = `Create a network connectivity configuration. - - Creates a network connectivity configuration (NCC), which provides stable - Azure service subnets when accessing your Azure Storage accounts. You can also - use a network connectivity configuration to create Databricks-managed private - endpoints so that Databricks serverless compute resources privately access - your resources. - - **IMPORTANT**: After you create the network connectivity configuration, you - must assign one or more workspaces to the new network connectivity - configuration. You can share one network connectivity configuration with - multiple workspaces from the same Azure region within the same Databricks - account. See [configure serverless secure connectivity]. - - [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security Arguments: NAME: The name of the network connectivity configuration. The name can contain alphanumeric characters, hyphens, and underscores. The length must be between 3 and 30 characters. The name must match the regular expression ^[0-9a-zA-Z-_]{3,30}$. - REGION: The Azure region for this network connectivity configuration. Only - workspaces in the same Azure region can be attached to this network - connectivity configuration.` + REGION: The region for the network connectivity configuration. Only workspaces in + the same region can be attached to the network connectivity configuration.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'region' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -139,12 +127,6 @@ func newCreateNetworkConnectivityConfiguration() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateNetworkConnectivityConfiguration()) - }) -} - // start create-private-endpoint-rule command // Slice with functions to override default command behavior. @@ -189,13 +171,13 @@ func newCreatePrivateEndpointRule() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only NETWORK_CONNECTIVITY_CONFIG_ID as positional arguments. Provide 'resource_id', 'group_id' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -240,12 +222,6 @@ func newCreatePrivateEndpointRule() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreatePrivateEndpointRule()) - }) -} - // start delete-network-connectivity-configuration command // Slice with functions to override default command behavior. @@ -274,7 +250,7 @@ func newDeleteNetworkConnectivityConfiguration() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -304,12 +280,6 @@ func newDeleteNetworkConnectivityConfiguration() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteNetworkConnectivityConfiguration()) - }) -} - // start delete-private-endpoint-rule command // Slice with functions to override default command behavior. @@ -330,11 +300,12 @@ func newDeletePrivateEndpointRule() *cobra.Command { cmd.Short = `Delete a private endpoint rule.` cmd.Long = `Delete a private endpoint rule. - Initiates deleting a private endpoint rule. The private endpoint will be - deactivated and will be purged after seven days of deactivation. When a - private endpoint is in deactivated state, deactivated field is set to true - and the private endpoint is not available to your serverless compute - resources. + Initiates deleting a private endpoint rule. If the connection state is PENDING + or EXPIRED, the private endpoint is immediately deleted. Otherwise, the + private endpoint is deactivated and will be deleted after seven days of + deactivation. When a private endpoint is deactivated, the deactivated field + is set to true and the private endpoint is not available to your serverless + compute resources. Arguments: NETWORK_CONNECTIVITY_CONFIG_ID: Your Network Connectvity Configuration ID. @@ -343,7 +314,7 @@ func newDeletePrivateEndpointRule() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -374,12 +345,6 @@ func newDeletePrivateEndpointRule() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeletePrivateEndpointRule()) - }) -} - // start get-network-connectivity-configuration command // Slice with functions to override default command behavior. @@ -408,7 +373,7 @@ func newGetNetworkConnectivityConfiguration() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -438,12 +403,6 @@ func newGetNetworkConnectivityConfiguration() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetNetworkConnectivityConfiguration()) - }) -} - // start get-private-endpoint-rule command // Slice with functions to override default command behavior. @@ -473,7 +432,7 @@ func newGetPrivateEndpointRule() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -504,12 +463,6 @@ func newGetPrivateEndpointRule() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPrivateEndpointRule()) - }) -} - // start list-network-connectivity-configurations command // Slice with functions to override default command behavior. @@ -537,7 +490,7 @@ func newListNetworkConnectivityConfigurations() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -546,11 +499,8 @@ func newListNetworkConnectivityConfigurations() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.NetworkConnectivity.ListNetworkConnectivityConfigurationsAll(ctx, listNetworkConnectivityConfigurationsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.NetworkConnectivity.ListNetworkConnectivityConfigurations(ctx, listNetworkConnectivityConfigurationsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -565,12 +515,6 @@ func newListNetworkConnectivityConfigurations() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListNetworkConnectivityConfigurations()) - }) -} - // start list-private-endpoint-rules command // Slice with functions to override default command behavior. @@ -601,7 +545,7 @@ func newListPrivateEndpointRules() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -612,11 +556,8 @@ func newListPrivateEndpointRules() *cobra.Command { listPrivateEndpointRulesReq.NetworkConnectivityConfigId = args[0] - response, err := a.NetworkConnectivity.ListPrivateEndpointRulesAll(ctx, listPrivateEndpointRulesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.NetworkConnectivity.ListPrivateEndpointRules(ctx, listPrivateEndpointRulesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -631,10 +572,4 @@ func newListPrivateEndpointRules() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListPrivateEndpointRules()) - }) -} - // end service NetworkConnectivity diff --git a/cmd/account/networks/networks.go b/cmd/account/networks/networks.go index 15586bdc9..05ef0c815 100755 --- a/cmd/account/networks/networks.go +++ b/cmd/account/networks/networks.go @@ -28,6 +28,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -75,13 +81,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'network_name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -119,12 +125,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -200,12 +200,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -277,12 +271,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -329,10 +317,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service Networks diff --git a/cmd/account/o-auth-published-apps/o-auth-published-apps.go b/cmd/account/o-auth-published-apps/o-auth-published-apps.go index b611724d4..6573b0529 100755 --- a/cmd/account/o-auth-published-apps/o-auth-published-apps.go +++ b/cmd/account/o-auth-published-apps/o-auth-published-apps.go @@ -27,6 +27,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -63,7 +66,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -72,11 +75,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.OAuthPublishedApps.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.OAuthPublishedApps.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -91,10 +91,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service OAuthPublishedApps diff --git a/cmd/account/personal-compute/personal-compute.go b/cmd/account/personal-compute/personal-compute.go new file mode 100755 index 000000000..2a14b0b33 --- /dev/null +++ b/cmd/account/personal-compute/personal-compute.go @@ -0,0 +1,219 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package personal_compute + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "personal-compute", + Short: `The Personal Compute enablement setting lets you control which users can use the Personal Compute default policy to create compute resources.`, + Long: `The Personal Compute enablement setting lets you control which users can use + the Personal Compute default policy to create compute resources. By default + all users in all workspaces have access (ON), but you can change the setting + to instead let individual workspaces configure access control (DELEGATE). + + There is only one instance of this setting per account. Since this setting has + a default value, this setting is present on all accounts even though it's + never set on a given account. Deletion reverts the value of the setting back + to the default value.`, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeletePersonalComputeSettingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeletePersonalComputeSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete Personal Compute setting.` + cmd.Long = `Delete Personal Compute setting. + + Reverts back the Personal Compute setting value to default (ON)` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.Settings.PersonalCompute().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetPersonalComputeSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetPersonalComputeSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get Personal Compute setting.` + cmd.Long = `Get Personal Compute setting. + + Gets the value of the Personal Compute setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + response, err := a.Settings.PersonalCompute().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdatePersonalComputeSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdatePersonalComputeSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update Personal Compute setting.` + cmd.Long = `Update Personal Compute setting. + + Updates the value of the Personal Compute setting.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustAccountClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + a := root.AccountClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := a.Settings.PersonalCompute().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service PersonalCompute diff --git a/cmd/account/private-access/private-access.go b/cmd/account/private-access/private-access.go index 458ff827e..d527fa64e 100755 --- a/cmd/account/private-access/private-access.go +++ b/cmd/account/private-access/private-access.go @@ -27,6 +27,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newReplace()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -86,13 +93,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'private_access_settings_name', 'region' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -133,12 +140,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -216,12 +217,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -299,12 +294,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -348,12 +337,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start replace command // Slice with functions to override default command behavior. @@ -412,13 +395,13 @@ func newReplace() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only PRIVATE_ACCESS_SETTINGS_ID as positional arguments. Provide 'private_access_settings_name', 'region' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -460,10 +443,4 @@ func newReplace() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReplace()) - }) -} - // end service PrivateAccess diff --git a/cmd/account/published-app-integration/published-app-integration.go b/cmd/account/published-app-integration/published-app-integration.go index d3209c670..32fed5cd0 100755 --- a/cmd/account/published-app-integration/published-app-integration.go +++ b/cmd/account/published-app-integration/published-app-integration.go @@ -27,6 +27,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -68,7 +75,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -103,12 +110,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -138,7 +139,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -168,12 +169,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -202,7 +197,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -232,12 +227,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -262,11 +251,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.PublishedAppIntegration.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.PublishedAppIntegration.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -281,12 +267,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -320,7 +300,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -356,10 +336,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service PublishedAppIntegration diff --git a/cmd/account/service-principal-secrets/service-principal-secrets.go b/cmd/account/service-principal-secrets/service-principal-secrets.go index 19d6a491d..47cfa4b08 100755 --- a/cmd/account/service-principal-secrets/service-principal-secrets.go +++ b/cmd/account/service-principal-secrets/service-principal-secrets.go @@ -38,6 +38,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -74,7 +79,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -107,12 +112,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -142,7 +141,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -176,12 +175,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -212,7 +205,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -226,11 +219,8 @@ func newList() *cobra.Command { return fmt.Errorf("invalid SERVICE_PRINCIPAL_ID: %s", args[0]) } - response, err := a.ServicePrincipalSecrets.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.ServicePrincipalSecrets.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -245,10 +235,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service ServicePrincipalSecrets diff --git a/cmd/account/service-principals/service-principals.go b/cmd/account/service-principals/service-principals.go index 80f1bf461..c86810f1d 100755 --- a/cmd/account/service-principals/service-principals.go +++ b/cmd/account/service-principals/service-principals.go @@ -32,6 +32,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -77,7 +85,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -112,12 +120,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -188,12 +190,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -265,12 +261,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -304,7 +294,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -313,11 +303,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.ServicePrincipals.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.ServicePrincipals.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -332,12 +319,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -420,12 +401,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -516,10 +491,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountServicePrincipals diff --git a/cmd/account/settings/settings.go b/cmd/account/settings/settings.go index e22b9950a..a750e81e0 100755 --- a/cmd/account/settings/settings.go +++ b/cmd/account/settings/settings.go @@ -3,11 +3,11 @@ package settings import ( - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/settings" "github.com/spf13/cobra" + + csp_enablement_account "github.com/databricks/cli/cmd/account/csp-enablement-account" + esm_enablement_account "github.com/databricks/cli/cmd/account/esm-enablement-account" + personal_compute "github.com/databricks/cli/cmd/account/personal-compute" ) // Slice with functions to override default command behavior. @@ -16,26 +16,20 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "settings", - Short: `The Personal Compute enablement setting lets you control which users can use the Personal Compute default policy to create compute resources.`, - Long: `The Personal Compute enablement setting lets you control which users can use - the Personal Compute default policy to create compute resources. By default - all users in all workspaces have access (ON), but you can change the setting - to instead let individual workspaces configure access control (DELEGATE). - - There is only one instance of this setting per account. Since this setting has - a default value, this setting is present on all accounts even though it's - never set on a given account. Deletion reverts the value of the setting back - to the default value.`, + Use: "settings", + Short: `Accounts Settings API allows users to manage settings at the account level.`, + Long: `Accounts Settings API allows users to manage settings at the account level.`, GroupID: "settings", Annotations: map[string]string{ "package": "settings", }, - - // This service is being previewed; hide from help output. - Hidden: true, } + // Add subservices + cmd.AddCommand(csp_enablement_account.New()) + cmd.AddCommand(esm_enablement_account.New()) + cmd.AddCommand(personal_compute.New()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -44,215 +38,4 @@ func New() *cobra.Command { return cmd } -// start delete-personal-compute-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deletePersonalComputeSettingOverrides []func( - *cobra.Command, - *settings.DeletePersonalComputeSettingRequest, -) - -func newDeletePersonalComputeSetting() *cobra.Command { - cmd := &cobra.Command{} - - var deletePersonalComputeSettingReq settings.DeletePersonalComputeSettingRequest - - // TODO: short flags - - cmd.Use = "delete-personal-compute-setting ETAG" - cmd.Short = `Delete Personal Compute setting.` - cmd.Long = `Delete Personal Compute setting. - - Reverts back the Personal Compute setting value to default (ON) - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - deletePersonalComputeSettingReq.Etag = args[0] - - response, err := a.Settings.DeletePersonalComputeSetting(ctx, deletePersonalComputeSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deletePersonalComputeSettingOverrides { - fn(cmd, &deletePersonalComputeSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeletePersonalComputeSetting()) - }) -} - -// start read-personal-compute-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var readPersonalComputeSettingOverrides []func( - *cobra.Command, - *settings.ReadPersonalComputeSettingRequest, -) - -func newReadPersonalComputeSetting() *cobra.Command { - cmd := &cobra.Command{} - - var readPersonalComputeSettingReq settings.ReadPersonalComputeSettingRequest - - // TODO: short flags - - cmd.Use = "read-personal-compute-setting ETAG" - cmd.Short = `Get Personal Compute setting.` - cmd.Long = `Get Personal Compute setting. - - Gets the value of the Personal Compute setting. - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - readPersonalComputeSettingReq.Etag = args[0] - - response, err := a.Settings.ReadPersonalComputeSetting(ctx, readPersonalComputeSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range readPersonalComputeSettingOverrides { - fn(cmd, &readPersonalComputeSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReadPersonalComputeSetting()) - }) -} - -// start update-personal-compute-setting command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updatePersonalComputeSettingOverrides []func( - *cobra.Command, - *settings.UpdatePersonalComputeSettingRequest, -) - -func newUpdatePersonalComputeSetting() *cobra.Command { - cmd := &cobra.Command{} - - var updatePersonalComputeSettingReq settings.UpdatePersonalComputeSettingRequest - var updatePersonalComputeSettingJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updatePersonalComputeSettingJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().BoolVar(&updatePersonalComputeSettingReq.AllowMissing, "allow-missing", updatePersonalComputeSettingReq.AllowMissing, `This should always be set to true for Settings RPCs.`) - // TODO: complex arg: setting - - cmd.Use = "update-personal-compute-setting" - cmd.Short = `Update Personal Compute setting.` - cmd.Long = `Update Personal Compute setting. - - Updates the value of the Personal Compute setting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustAccountClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - a := root.AccountClient(ctx) - - if cmd.Flags().Changed("json") { - err = updatePersonalComputeSettingJson.Unmarshal(&updatePersonalComputeSettingReq) - if err != nil { - return err - } - } - - response, err := a.Settings.UpdatePersonalComputeSetting(ctx, updatePersonalComputeSettingReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updatePersonalComputeSettingOverrides { - fn(cmd, &updatePersonalComputeSettingReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePersonalComputeSetting()) - }) -} - // end service AccountSettings diff --git a/cmd/account/storage-credentials/storage-credentials.go b/cmd/account/storage-credentials/storage-credentials.go index 35b865c7f..4280ae8c3 100755 --- a/cmd/account/storage-credentials/storage-credentials.go +++ b/cmd/account/storage-credentials/storage-credentials.go @@ -25,6 +25,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -71,7 +78,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -107,12 +114,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -145,7 +146,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -176,12 +177,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -213,7 +208,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -244,12 +239,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -279,7 +268,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -290,11 +279,8 @@ func newList() *cobra.Command { listReq.MetastoreId = args[0] - response, err := a.StorageCredentials.List(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.StorageCredentials.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -309,12 +295,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -350,7 +330,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -387,10 +367,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountStorageCredentials diff --git a/cmd/account/storage/storage.go b/cmd/account/storage/storage.go index d671355d6..50460ed0a 100755 --- a/cmd/account/storage/storage.go +++ b/cmd/account/storage/storage.go @@ -32,6 +32,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -108,12 +114,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -185,12 +185,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -261,12 +255,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -310,10 +298,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service Storage diff --git a/cmd/account/users/users.go b/cmd/account/users/users.go index 551766e88..289d2972f 100755 --- a/cmd/account/users/users.go +++ b/cmd/account/users/users.go @@ -37,6 +37,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -85,7 +93,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -120,12 +128,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -197,12 +199,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -281,12 +277,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -320,7 +310,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -329,11 +319,8 @@ func newList() *cobra.Command { ctx := cmd.Context() a := root.AccountClient(ctx) - response, err := a.Users.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.Users.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -348,12 +335,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -436,12 +417,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -533,10 +508,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service AccountUsers diff --git a/cmd/account/vpc-endpoints/vpc-endpoints.go b/cmd/account/vpc-endpoints/vpc-endpoints.go index 6d80e7314..e6c6c126a 100755 --- a/cmd/account/vpc-endpoints/vpc-endpoints.go +++ b/cmd/account/vpc-endpoints/vpc-endpoints.go @@ -27,6 +27,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -82,13 +88,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'vpc_endpoint_name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -126,12 +132,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -210,12 +210,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -290,12 +284,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -343,10 +331,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service VpcEndpoints diff --git a/cmd/account/workspace-assignment/workspace-assignment.go b/cmd/account/workspace-assignment/workspace-assignment.go index 7780d90f4..b965d31ad 100755 --- a/cmd/account/workspace-assignment/workspace-assignment.go +++ b/cmd/account/workspace-assignment/workspace-assignment.go @@ -28,6 +28,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -66,7 +72,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -103,12 +109,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -137,7 +137,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -170,12 +170,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -205,7 +199,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -219,11 +213,8 @@ func newList() *cobra.Command { return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) } - response, err := a.WorkspaceAssignment.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := a.WorkspaceAssignment.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -238,12 +229,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -276,7 +261,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -302,11 +287,11 @@ func newUpdate() *cobra.Command { return fmt.Errorf("invalid PRINCIPAL_ID: %s", args[1]) } - err = a.WorkspaceAssignment.Update(ctx, updateReq) + response, err := a.WorkspaceAssignment.Update(ctx, updateReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -321,10 +306,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service WorkspaceAssignment diff --git a/cmd/account/workspaces/workspaces.go b/cmd/account/workspaces/workspaces.go index 500a7b771..1ec6230b2 100755 --- a/cmd/account/workspaces/workspaces.go +++ b/cmd/account/workspaces/workspaces.go @@ -35,6 +35,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -110,13 +117,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'workspace_name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -166,12 +173,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -252,12 +253,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -344,12 +339,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -396,12 +385,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -429,7 +412,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.CredentialsId, "credentials-id", updateReq.CredentialsId, `ID of the workspace's credential configuration object.`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().StringVar(&updateReq.ManagedServicesCustomerManagedKeyId, "managed-services-customer-managed-key-id", updateReq.ManagedServicesCustomerManagedKeyId, `The ID of the workspace's managed services encryption key configuration object.`) - cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, `The ID of the network connectivity configuration object, which is the parent resource of this private endpoint rule object.`) + cmd.Flags().StringVar(&updateReq.NetworkConnectivityConfigId, "network-connectivity-config-id", updateReq.NetworkConnectivityConfigId, ``) cmd.Flags().StringVar(&updateReq.NetworkId, "network-id", updateReq.NetworkId, `The ID of the workspace's network configuration object.`) cmd.Flags().StringVar(&updateReq.StorageConfigurationId, "storage-configuration-id", updateReq.StorageConfigurationId, `The ID of the workspace's storage configuration object.`) cmd.Flags().StringVar(&updateReq.StorageCustomerManagedKeyId, "storage-customer-managed-key-id", updateReq.StorageCustomerManagedKeyId, `The ID of the key configuration object for workspace storage.`) @@ -464,7 +447,12 @@ func newUpdate() *cobra.Command { workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty - custom tags, the update would not be applied. + custom tags, the update would not be applied. - Network connectivity + configuration ID to add serverless stable IP support. You can add or update + the network connectivity configuration ID to ensure the workspace uses the + same set of stable IP CIDR blocks to access your resources. You cannot remove + a network connectivity configuration from the workspace once attached, you can + only switch to another one. After calling the PATCH operation to update the workspace configuration, make repeated GET requests with the workspace ID and check the workspace @@ -476,25 +464,22 @@ func newUpdate() *cobra.Command { ### Update a running workspace You can update a Databricks workspace configuration for running workspaces for some fields, but not all fields. For a running workspace, this request supports updating the following fields only: - - Credential configuration ID - - - Network configuration ID. Used only if you already use a customer-managed - VPC. You cannot convert a running workspace from a Databricks-managed VPC to a - customer-managed VPC. You can use a network configuration update in this API - for a failed or running workspace to add support for PrivateLink, although you - also need to add a private access settings object. - - - Key configuration ID for managed services (control plane storage, such as - notebook source and Databricks SQL queries). Databricks does not directly - encrypt the data with the customer-managed key (CMK). Databricks uses both the - CMK and the Databricks managed key (DMK) that is unique to your workspace to - encrypt the Data Encryption Key (DEK). Databricks uses the DEK to encrypt your - workspace's managed services persisted data. If the workspace does not already - have a CMK for managed services, adding this ID enables managed services - encryption for new or updated data. Existing managed services data that - existed before adding the key remains not encrypted with the DEK until it is - modified. If the workspace already has customer-managed keys for managed - services, this request rotates (changes) the CMK keys and the DEK is + - Credential configuration ID - Network configuration ID. Used only if you + already use a customer-managed VPC. You cannot convert a running workspace + from a Databricks-managed VPC to a customer-managed VPC. You can use a network + configuration update in this API for a failed or running workspace to add + support for PrivateLink, although you also need to add a private access + settings object. - Key configuration ID for managed services (control plane + storage, such as notebook source and Databricks SQL queries). Databricks does + not directly encrypt the data with the customer-managed key (CMK). Databricks + uses both the CMK and the Databricks managed key (DMK) that is unique to your + workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to + encrypt your workspace's managed services persisted data. If the workspace + does not already have a CMK for managed services, adding this ID enables + managed services encryption for new or updated data. Existing managed services + data that existed before adding the key remains not encrypted with the DEK + until it is modified. If the workspace already has customer-managed keys for + managed services, this request rotates (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new CMK. - Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not already have a customer-managed key @@ -503,7 +488,12 @@ func newUpdate() *cobra.Command { upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - Custom tags. Given you provide an empty - custom tags, the update would not be applied. + custom tags, the update would not be applied. - Network connectivity + configuration ID to add serverless stable IP support. You can add or update + the network connectivity configuration ID to ensure the workspace uses the + same set of stable IP CIDR blocks to access your resources. You cannot remove + a network connectivity configuration from the workspace once attached, you can + only switch to another one. **Important**: To update a running workspace, your workspace must have no running compute resources that run in your workspace's VPC in the Classic data @@ -523,11 +513,9 @@ func newUpdate() *cobra.Command { This results in a total of up to 40 minutes in which you cannot create clusters. If you create or use clusters before this time interval elapses, clusters do not launch successfully, fail, or could cause other unexpected - behavior. - - * For workspaces with a customer-managed VPC, the workspace status stays at - status RUNNING and the VPC change happens immediately. A change to the - storage customer-managed key configuration ID might take a few minutes to + behavior. * For workspaces with a customer-managed VPC, the workspace status + stays at status RUNNING and the VPC change happens immediately. A change to + the storage customer-managed key configuration ID might take a few minutes to update, so continue to check the workspace until you observe that it has been updated. If the update fails, the workspace might revert silently to its original configuration. After the workspace has been updated, you cannot use @@ -621,10 +609,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Workspaces diff --git a/cmd/api/api.go b/cmd/api/api.go index 11a5e3e36..03460f717 100644 --- a/cmd/api/api.go +++ b/cmd/api/api.go @@ -5,6 +5,7 @@ import ( "net/http" "strings" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/client" @@ -35,7 +36,7 @@ func makeCommand(method string) *cobra.Command { command := &cobra.Command{ Use: strings.ToLower(method), - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: fmt.Sprintf("Perform %s request", method), RunE: func(cmd *cobra.Command, args []string) error { var path = args[0] diff --git a/cmd/auth/auth.go b/cmd/auth/auth.go index e0c7c7c5b..79e1063b1 100644 --- a/cmd/auth/auth.go +++ b/cmd/auth/auth.go @@ -12,6 +12,13 @@ func New() *cobra.Command { cmd := &cobra.Command{ Use: "auth", Short: "Authentication related commands", + Long: `Authentication related commands. For more information regarding how +authentication for the Databricks CLI and SDKs work please refer to the documentation +linked below. + +AWS: https://docs.databricks.com/dev-tools/auth/index.html +Azure: https://learn.microsoft.com/azure/databricks/dev-tools/auth +GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html`, } var perisistentAuth auth.PersistentAuth @@ -22,14 +29,13 @@ func New() *cobra.Command { cmd.AddCommand(newLoginCommand(&perisistentAuth)) cmd.AddCommand(newProfilesCommand()) cmd.AddCommand(newTokenCommand(&perisistentAuth)) + cmd.AddCommand(newDescribeCommand()) return cmd } func promptForHost(ctx context.Context) (string, error) { prompt := cmdio.Prompt(ctx) - prompt.Label = "Databricks Host" - prompt.Default = "https://" - prompt.AllowEdit = true + prompt.Label = "Databricks Host (e.g. https://.cloud.databricks.com)" // Validate? host, err := prompt.Run() if err != nil { diff --git a/cmd/auth/describe.go b/cmd/auth/describe.go new file mode 100644 index 000000000..3a6e3d5d7 --- /dev/null +++ b/cmd/auth/describe.go @@ -0,0 +1,192 @@ +package auth + +import ( + "context" + "encoding/json" + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/config" + "github.com/spf13/cobra" +) + +var authTemplate = `{{"Host:" | bold}} {{.Status.Details.Host}} +{{- if .Status.AccountID}} +{{"Account ID:" | bold}} {{.Status.AccountID}} +{{- end}} +{{- if .Status.Username}} +{{"User:" | bold}} {{.Status.Username}} +{{- end}} +{{"Authenticated with:" | bold}} {{.Status.Details.AuthType}} +----- +` + configurationTemplate + +var errorTemplate = `Unable to authenticate: {{.Status.Error}} +----- +` + configurationTemplate + +const configurationTemplate = `Current configuration: + {{- $details := .Status.Details}} + {{- range $a := .ConfigAttributes}} + {{- $k := $a.Name}} + {{- if index $details.Configuration $k}} + {{- $v := index $details.Configuration $k}} + {{if $v.AuthTypeMismatch}}~{{else}}✓{{end}} {{$k | bold}}: {{$v.Value}} + {{- if not (eq $v.Source.String "dynamic configuration")}} + {{- " (from" | italic}} {{$v.Source.String | italic}} + {{- if $v.AuthTypeMismatch}}, {{ "not used for auth type " | red | italic }}{{$details.AuthType | red | italic}}{{end}}) + {{- end}} + {{- end}} + {{- end}} +` + +func newDescribeCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "describe", + Short: "Describes the credentials and the source of those credentials, being used by the CLI to authenticate", + } + + var showSensitive bool + cmd.Flags().BoolVar(&showSensitive, "sensitive", false, "Include sensitive fields like passwords and tokens in the output") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + var status *authStatus + var err error + status, err = getAuthStatus(cmd, args, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + isAccount, err := root.MustAnyClient(cmd, args) + return root.ConfigUsed(cmd.Context()), isAccount, err + }) + + if err != nil { + return err + } + + if status.Error != nil { + return render(ctx, cmd, status, errorTemplate) + } + + return render(ctx, cmd, status, authTemplate) + } + + return cmd +} + +type tryAuth func(cmd *cobra.Command, args []string) (*config.Config, bool, error) + +func getAuthStatus(cmd *cobra.Command, args []string, showSensitive bool, fn tryAuth) (*authStatus, error) { + cfg, isAccount, err := fn(cmd, args) + ctx := cmd.Context() + if err != nil { + return &authStatus{ + Status: "error", + Error: err, + Details: getAuthDetails(cmd, cfg, showSensitive), + }, nil + } + + if isAccount { + a := root.AccountClient(ctx) + + // Doing a simple API call to check if the auth is valid + _, err := a.Workspaces.List(ctx) + if err != nil { + return &authStatus{ + Status: "error", + Error: err, + Details: getAuthDetails(cmd, cfg, showSensitive), + }, nil + } + + status := authStatus{ + Status: "success", + Details: getAuthDetails(cmd, a.Config, showSensitive), + AccountID: a.Config.AccountID, + Username: a.Config.Username, + } + + return &status, nil + } + + w := root.WorkspaceClient(ctx) + me, err := w.CurrentUser.Me(ctx) + if err != nil { + return &authStatus{ + Status: "error", + Error: err, + Details: getAuthDetails(cmd, cfg, showSensitive), + }, nil + } + + status := authStatus{ + Status: "success", + Details: getAuthDetails(cmd, w.Config, showSensitive), + Username: me.UserName, + } + + return &status, nil +} + +func render(ctx context.Context, cmd *cobra.Command, status *authStatus, template string) error { + switch root.OutputType(cmd) { + case flags.OutputText: + return cmdio.RenderWithTemplate(ctx, map[string]any{ + "Status": status, + "ConfigAttributes": config.ConfigAttributes, + }, "", template) + case flags.OutputJSON: + buf, err := json.MarshalIndent(status, "", " ") + if err != nil { + return err + } + cmd.OutOrStdout().Write(buf) + default: + return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) + } + + return nil +} + +type authStatus struct { + Status string `json:"status"` + Error error `json:"error,omitempty"` + Username string `json:"username,omitempty"` + AccountID string `json:"account_id,omitempty"` + Details config.AuthDetails `json:"details"` +} + +func getAuthDetails(cmd *cobra.Command, cfg *config.Config, showSensitive bool) config.AuthDetails { + var opts []config.AuthDetailsOptions + if showSensitive { + opts = append(opts, config.ShowSensitive) + } + details := cfg.GetAuthDetails(opts...) + + for k, v := range details.Configuration { + if k == "profile" && cmd.Flag("profile").Changed { + v.Source = config.Source{Type: config.SourceType("flag"), Name: "--profile"} + } + + if k == "host" && cmd.Flag("host").Changed { + v.Source = config.Source{Type: config.SourceType("flag"), Name: "--host"} + } + } + + // If profile is not set explicitly, default to "default" + if _, ok := details.Configuration["profile"]; !ok { + profile := cfg.Profile + if profile == "" { + profile = "default" + } + details.Configuration["profile"] = &config.AttrConfig{Value: profile, Source: config.Source{Type: config.SourceDynamicConfig}} + } + + // Unset source for databricks_cli_path because it can't be overridden anyway + if v, ok := details.Configuration["databricks_cli_path"]; ok { + v.Source = config.Source{Type: config.SourceDynamicConfig} + } + + return details +} diff --git a/cmd/auth/describe_test.go b/cmd/auth/describe_test.go new file mode 100644 index 000000000..d0260abc7 --- /dev/null +++ b/cmd/auth/describe_test.go @@ -0,0 +1,217 @@ +package auth + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/databricks-sdk-go/config" + "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/spf13/cobra" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +func TestGetWorkspaceAuthStatus(t *testing.T) { + ctx := context.Background() + m := mocks.NewMockWorkspaceClient(t) + ctx = root.SetWorkspaceClient(ctx, m.WorkspaceClient) + + cmd := &cobra.Command{} + cmd.SetContext(ctx) + + showSensitive := false + + currentUserApi := m.GetMockCurrentUserAPI() + currentUserApi.EXPECT().Me(mock.Anything).Return(&iam.User{ + UserName: "test-user", + }, nil) + + cmd.Flags().String("host", "", "") + cmd.Flags().String("profile", "", "") + cmd.Flag("profile").Value.Set("my-profile") + cmd.Flag("profile").Changed = true + + cfg := &config.Config{ + Profile: "my-profile", + } + m.WorkspaceClient.Config = cfg + t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") + config.ConfigAttributes.Configure(cfg) + + status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + "host": "https://test.com", + "token": "test-token", + "auth_type": "azure-cli", + }) + return cfg, false, nil + }) + require.NoError(t, err) + require.NotNil(t, status) + require.Equal(t, "success", status.Status) + require.Equal(t, "test-user", status.Username) + require.Equal(t, "https://test.com", status.Details.Host) + require.Equal(t, "azure-cli", status.Details.AuthType) + + require.Equal(t, "azure-cli", status.Details.Configuration["auth_type"].Value) + require.Equal(t, "DATABRICKS_AUTH_TYPE environment variable", status.Details.Configuration["auth_type"].Source.String()) + require.False(t, status.Details.Configuration["auth_type"].AuthTypeMismatch) + + require.Equal(t, "********", status.Details.Configuration["token"].Value) + require.Equal(t, "dynamic configuration", status.Details.Configuration["token"].Source.String()) + require.True(t, status.Details.Configuration["token"].AuthTypeMismatch) + + require.Equal(t, "my-profile", status.Details.Configuration["profile"].Value) + require.Equal(t, "--profile flag", status.Details.Configuration["profile"].Source.String()) + require.False(t, status.Details.Configuration["profile"].AuthTypeMismatch) +} + +func TestGetWorkspaceAuthStatusError(t *testing.T) { + ctx := context.Background() + m := mocks.NewMockWorkspaceClient(t) + ctx = root.SetWorkspaceClient(ctx, m.WorkspaceClient) + + cmd := &cobra.Command{} + cmd.SetContext(ctx) + + showSensitive := false + + cmd.Flags().String("host", "", "") + cmd.Flags().String("profile", "", "") + cmd.Flag("profile").Value.Set("my-profile") + cmd.Flag("profile").Changed = true + + cfg := &config.Config{ + Profile: "my-profile", + } + m.WorkspaceClient.Config = cfg + t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") + config.ConfigAttributes.Configure(cfg) + + status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + "host": "https://test.com", + "token": "test-token", + "auth_type": "azure-cli", + }) + return cfg, false, fmt.Errorf("auth error") + }) + require.NoError(t, err) + require.NotNil(t, status) + require.Equal(t, "error", status.Status) + + require.Equal(t, "azure-cli", status.Details.Configuration["auth_type"].Value) + require.Equal(t, "DATABRICKS_AUTH_TYPE environment variable", status.Details.Configuration["auth_type"].Source.String()) + require.False(t, status.Details.Configuration["auth_type"].AuthTypeMismatch) + + require.Equal(t, "********", status.Details.Configuration["token"].Value) + require.Equal(t, "dynamic configuration", status.Details.Configuration["token"].Source.String()) + require.True(t, status.Details.Configuration["token"].AuthTypeMismatch) + + require.Equal(t, "my-profile", status.Details.Configuration["profile"].Value) + require.Equal(t, "--profile flag", status.Details.Configuration["profile"].Source.String()) + require.False(t, status.Details.Configuration["profile"].AuthTypeMismatch) +} + +func TestGetWorkspaceAuthStatusSensitive(t *testing.T) { + ctx := context.Background() + m := mocks.NewMockWorkspaceClient(t) + ctx = root.SetWorkspaceClient(ctx, m.WorkspaceClient) + + cmd := &cobra.Command{} + cmd.SetContext(ctx) + + showSensitive := true + + cmd.Flags().String("host", "", "") + cmd.Flags().String("profile", "", "") + cmd.Flag("profile").Value.Set("my-profile") + cmd.Flag("profile").Changed = true + + cfg := &config.Config{ + Profile: "my-profile", + } + m.WorkspaceClient.Config = cfg + t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") + config.ConfigAttributes.Configure(cfg) + + status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + "host": "https://test.com", + "token": "test-token", + "auth_type": "azure-cli", + }) + return cfg, false, fmt.Errorf("auth error") + }) + require.NoError(t, err) + require.NotNil(t, status) + require.Equal(t, "error", status.Status) + + require.Equal(t, "azure-cli", status.Details.Configuration["auth_type"].Value) + require.Equal(t, "DATABRICKS_AUTH_TYPE environment variable", status.Details.Configuration["auth_type"].Source.String()) + require.False(t, status.Details.Configuration["auth_type"].AuthTypeMismatch) + + require.Equal(t, "test-token", status.Details.Configuration["token"].Value) + require.Equal(t, "dynamic configuration", status.Details.Configuration["token"].Source.String()) + require.True(t, status.Details.Configuration["token"].AuthTypeMismatch) +} + +func TestGetAccountAuthStatus(t *testing.T) { + ctx := context.Background() + m := mocks.NewMockAccountClient(t) + ctx = root.SetAccountClient(ctx, m.AccountClient) + + cmd := &cobra.Command{} + cmd.SetContext(ctx) + + showSensitive := false + + cmd.Flags().String("host", "", "") + cmd.Flags().String("profile", "", "") + cmd.Flag("profile").Value.Set("my-profile") + cmd.Flag("profile").Changed = true + + cfg := &config.Config{ + Profile: "my-profile", + } + m.AccountClient.Config = cfg + t.Setenv("DATABRICKS_AUTH_TYPE", "azure-cli") + config.ConfigAttributes.Configure(cfg) + + wsApi := m.GetMockWorkspacesAPI() + wsApi.EXPECT().List(mock.Anything).Return(nil, nil) + + status, err := getAuthStatus(cmd, []string{}, showSensitive, func(cmd *cobra.Command, args []string) (*config.Config, bool, error) { + config.ConfigAttributes.ResolveFromStringMap(cfg, map[string]string{ + "account_id": "test-account-id", + "username": "test-user", + "host": "https://test.com", + "token": "test-token", + "auth_type": "azure-cli", + }) + return cfg, true, nil + }) + require.NoError(t, err) + require.NotNil(t, status) + require.Equal(t, "success", status.Status) + + require.Equal(t, "test-user", status.Username) + require.Equal(t, "https://test.com", status.Details.Host) + require.Equal(t, "azure-cli", status.Details.AuthType) + require.Equal(t, "test-account-id", status.AccountID) + + require.Equal(t, "azure-cli", status.Details.Configuration["auth_type"].Value) + require.Equal(t, "DATABRICKS_AUTH_TYPE environment variable", status.Details.Configuration["auth_type"].Source.String()) + require.False(t, status.Details.Configuration["auth_type"].AuthTypeMismatch) + + require.Equal(t, "********", status.Details.Configuration["token"].Value) + require.Equal(t, "dynamic configuration", status.Details.Configuration["token"].Source.String()) + require.True(t, status.Details.Configuration["token"].AuthTypeMismatch) + + require.Equal(t, "my-profile", status.Details.Configuration["profile"].Value) + require.Equal(t, "--profile flag", status.Details.Configuration["profile"].Source.String()) + require.False(t, status.Details.Configuration["profile"].AuthTypeMismatch) +} diff --git a/cmd/auth/env.go b/cmd/auth/env.go index 04aef36a8..e72d15399 100644 --- a/cmd/auth/env.go +++ b/cmd/auth/env.go @@ -10,7 +10,7 @@ import ( "net/url" "strings" - "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/databrickscfg/profile" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" "gopkg.in/ini.v1" @@ -70,7 +70,7 @@ func resolveSection(cfg *config.Config, iniFile *config.File) (*ini.Section, err } func loadFromDatabricksCfg(ctx context.Context, cfg *config.Config) error { - iniFile, err := databrickscfg.Get(ctx) + iniFile, err := profile.DefaultProfiler.Get(ctx) if errors.Is(err, fs.ErrNotExist) { // it's fine not to have ~/.databrickscfg return nil diff --git a/cmd/auth/login.go b/cmd/auth/login.go index b0bc7a853..11cba8e5f 100644 --- a/cmd/auth/login.go +++ b/cmd/auth/login.go @@ -4,12 +4,14 @@ import ( "context" "errors" "fmt" + "runtime" "time" "github.com/databricks/cli/libs/auth" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/cli/libs/databrickscfg/cfgpickers" + "github.com/databricks/cli/libs/databrickscfg/profile" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" @@ -30,16 +32,61 @@ func configureHost(ctx context.Context, persistentAuth *auth.PersistentAuth, arg } const minimalDbConnectVersion = "13.1" +const defaultTimeout = 1 * time.Hour func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { + defaultConfigPath := "~/.databrickscfg" + if runtime.GOOS == "windows" { + defaultConfigPath = "%USERPROFILE%\\.databrickscfg" + } cmd := &cobra.Command{ Use: "login [HOST]", - Short: "Authenticate this machine", + Short: "Log into a Databricks workspace or account", + Long: fmt.Sprintf(`Log into a Databricks workspace or account. +This command logs you into the Databricks workspace or account and saves +the authentication configuration in a profile (in %s by default). + +This profile can then be used to authenticate other Databricks CLI commands by +specifying the --profile flag. This profile can also be used to authenticate +other Databricks tooling that supports the Databricks Unified Authentication +Specification. This includes the Databricks Go, Python, and Java SDKs. For more information, +you can refer to the documentation linked below. + AWS: https://docs.databricks.com/dev-tools/auth/index.html + Azure: https://learn.microsoft.com/azure/databricks/dev-tools/auth + GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html + + +This command requires a Databricks Host URL (using --host or as a positional argument +or implicitly inferred from the specified profile name) +and a profile name (using --profile) to be specified. If you don't specify these +values, you'll be prompted for values at runtime. + +While this command always logs you into the specified host, the runtime behaviour +depends on the existing profiles you have set in your configuration file +(at %s by default). + +1. If a profile with the specified name exists and specifies a host, you'll + be logged into the host specified by the profile. The profile will be updated + to use "databricks-cli" as the auth type if that was not the case before. + +2. If a profile with the specified name exists but does not specify a host, + you'll be prompted to specify a host. The profile will be updated to use the + specified host. The auth type will be updated to "databricks-cli" if that was + not the case before. + +3. If a profile with the specified name exists and specifies a host, but you + specify a host using --host (or as the [HOST] positional arg), the profile will + be updated to use the newly specified host. The auth type will be updated to + "databricks-cli" if that was not the case before. + +4. If a profile with the specified name does not exist, a new profile will be + created with the specified host. The auth type will be set to "databricks-cli". +`, defaultConfigPath, defaultConfigPath), } var loginTimeout time.Duration var configureCluster bool - cmd.Flags().DurationVar(&loginTimeout, "timeout", auth.DefaultTimeout, + cmd.Flags().DurationVar(&loginTimeout, "timeout", defaultTimeout, "Timeout for completing login challenge in the browser") cmd.Flags().BoolVar(&configureCluster, "configure-cluster", false, "Prompts to configure cluster") @@ -63,7 +110,7 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { profileName = profile } - err := setHost(ctx, profileName, persistentAuth, args) + err := setHostAndAccountId(ctx, profileName, persistentAuth, args) if err != nil { return err } @@ -72,17 +119,10 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { // We need the config without the profile before it's used to initialise new workspace client below. // Otherwise it will complain about non existing profile because it was not yet saved. cfg := config.Config{ - Host: persistentAuth.Host, - AuthType: "databricks-cli", + Host: persistentAuth.Host, + AccountID: persistentAuth.AccountID, + AuthType: "databricks-cli", } - if cfg.IsAccountClient() && persistentAuth.AccountID == "" { - accountId, err := promptForAccountID(ctx) - if err != nil { - return err - } - persistentAuth.AccountID = accountId - } - cfg.AccountID = persistentAuth.AccountID ctx, cancel := context.WithTimeout(ctx, loginTimeout) defer cancel() @@ -127,15 +167,15 @@ func newLoginCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { return cmd } -func setHost(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { +func setHostAndAccountId(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error { + profiler := profile.GetProfiler(ctx) // If the chosen profile has a hostname and the user hasn't specified a host, infer the host from the profile. - _, profiles, err := databrickscfg.LoadProfiles(ctx, func(p databrickscfg.Profile) bool { - return p.Name == profileName - }) + profiles, err := profiler.LoadProfiles(ctx, profile.WithName(profileName)) // Tolerate ErrNoConfiguration here, as we will write out a configuration as part of the login flow. - if err != nil && !errors.Is(err, databrickscfg.ErrNoConfiguration) { + if err != nil && !errors.Is(err, profile.ErrNoConfiguration) { return err } + if persistentAuth.Host == "" { if len(profiles) > 0 && profiles[0].Host != "" { persistentAuth.Host = profiles[0].Host @@ -143,5 +183,17 @@ func setHost(ctx context.Context, profileName string, persistentAuth *auth.Persi configureHost(ctx, persistentAuth, args, 0) } } + isAccountClient := (&config.Config{Host: persistentAuth.Host}).IsAccountClient() + if isAccountClient && persistentAuth.AccountID == "" { + if len(profiles) > 0 && profiles[0].AccountID != "" { + persistentAuth.AccountID = profiles[0].AccountID + } else { + accountId, err := promptForAccountID(ctx) + if err != nil { + return err + } + persistentAuth.AccountID = accountId + } + } return nil } diff --git a/cmd/auth/login_test.go b/cmd/auth/login_test.go index 9b834bd0a..ce3ca5ae5 100644 --- a/cmd/auth/login_test.go +++ b/cmd/auth/login_test.go @@ -12,6 +12,6 @@ import ( func TestSetHostDoesNotFailWithNoDatabrickscfg(t *testing.T) { ctx := context.Background() ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./imaginary-file/databrickscfg") - err := setHost(ctx, "foo", &auth.PersistentAuth{Host: "test"}, []string{}) + err := setHostAndAccountId(ctx, "foo", &auth.PersistentAuth{Host: "test"}, []string{}) assert.NoError(t, err) } diff --git a/cmd/auth/profiles.go b/cmd/auth/profiles.go index 51ae9b185..2fc8a314b 100644 --- a/cmd/auth/profiles.go +++ b/cmd/auth/profiles.go @@ -2,13 +2,15 @@ package auth import ( "context" + "errors" "fmt" - "net/http" - "os" + "io/fs" "sync" + "time" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/databrickscfg/profile" + "github.com/databricks/cli/libs/log" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" "github.com/spf13/cobra" @@ -28,9 +30,12 @@ func (c *profileMetadata) IsEmpty() bool { return c.Host == "" && c.AccountID == "" } -func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { - // TODO: disable config loaders other than configfile - cfg := &config.Config{Profile: c.Name} +func (c *profileMetadata) Load(ctx context.Context, configFilePath string, skipValidate bool) { + cfg := &config.Config{ + Loaders: []config.Loader{config.ConfigFile}, + ConfigFile: configFilePath, + Profile: c.Name, + } _ = cfg.EnsureResolved() if cfg.IsAws() { c.Cloud = "aws" @@ -41,12 +46,7 @@ func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { } if skipValidate { - err := cfg.Authenticate(&http.Request{ - Header: make(http.Header), - }) - if err != nil { - return - } + c.Host = cfg.CanonicalHostName() c.AuthType = cfg.AuthType return } @@ -57,6 +57,7 @@ func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { return } _, err = a.Workspaces.List(ctx) + c.Host = cfg.Host c.AuthType = cfg.AuthType if err != nil { return @@ -68,14 +69,13 @@ func (c *profileMetadata) Load(ctx context.Context, skipValidate bool) { return } _, err = w.CurrentUser.Me(ctx) + c.Host = cfg.Host c.AuthType = cfg.AuthType if err != nil { return } c.Valid = true } - // set host again, this time normalized - c.Host = cfg.Host } func newProfilesCommand() *cobra.Command { @@ -95,8 +95,8 @@ func newProfilesCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { var profiles []*profileMetadata - iniFile, err := databrickscfg.Get(cmd.Context()) - if os.IsNotExist(err) { + iniFile, err := profile.DefaultProfiler.Get(cmd.Context()) + if errors.Is(err, fs.ErrNotExist) { // return empty list for non-configured machines iniFile = &config.File{ File: &ini.File{}, @@ -117,8 +117,10 @@ func newProfilesCommand() *cobra.Command { } wg.Add(1) go func() { - // load more information about profile - profile.Load(cmd.Context(), skipValidate) + ctx := cmd.Context() + t := time.Now() + profile.Load(ctx, iniFile.Path(), skipValidate) + log.Debugf(ctx, "Profile %q took %s to load", profile.Name, time.Since(t)) wg.Done() }() profiles = append(profiles, profile) diff --git a/cmd/auth/profiles_test.go b/cmd/auth/profiles_test.go new file mode 100644 index 000000000..91ff4d049 --- /dev/null +++ b/cmd/auth/profiles_test.go @@ -0,0 +1,46 @@ +package auth + +import ( + "context" + "path/filepath" + "runtime" + "testing" + + "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/databricks-sdk-go/config" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestProfiles(t *testing.T) { + ctx := context.Background() + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + + // Create a config file with a profile + err := databrickscfg.SaveToProfile(ctx, &config.Config{ + ConfigFile: configFile, + Profile: "profile1", + Host: "abc.cloud.databricks.com", + Token: "token1", + AuthType: "pat", + }) + require.NoError(t, err) + + // Let the environment think we're using another profile + t.Setenv("DATABRICKS_HOST", "https://def.cloud.databricks.com") + t.Setenv("HOME", dir) + if runtime.GOOS == "windows" { + t.Setenv("USERPROFILE", dir) + } + + // Load the profile + profile := &profileMetadata{Name: "profile1"} + profile.Load(ctx, configFile, true) + + // Check the profile + assert.Equal(t, "profile1", profile.Name) + assert.Equal(t, "https://abc.cloud.databricks.com", profile.Host) + assert.Equal(t, "aws", profile.Cloud) + assert.Equal(t, "pat", profile.AuthType) +} diff --git a/cmd/auth/token.go b/cmd/auth/token.go index d763b9564..3f9af43fa 100644 --- a/cmd/auth/token.go +++ b/cmd/auth/token.go @@ -4,12 +4,44 @@ import ( "context" "encoding/json" "errors" + "fmt" + "os" + "strings" "time" "github.com/databricks/cli/libs/auth" + "github.com/databricks/databricks-sdk-go/httpclient" "github.com/spf13/cobra" ) +type tokenErrorResponse struct { + Error string `json:"error"` + ErrorDescription string `json:"error_description"` +} + +func buildLoginCommand(profile string, persistentAuth *auth.PersistentAuth) string { + executable := os.Args[0] + cmd := []string{ + executable, + "auth", + "login", + } + if profile != "" { + cmd = append(cmd, "--profile", profile) + } else { + cmd = append(cmd, "--host", persistentAuth.Host) + if persistentAuth.AccountID != "" { + cmd = append(cmd, "--account-id", persistentAuth.AccountID) + } + } + return strings.Join(cmd, " ") +} + +func helpfulError(profile string, persistentAuth *auth.PersistentAuth) string { + loginMsg := buildLoginCommand(profile, persistentAuth) + return fmt.Sprintf("Try logging in again with `%s` before retrying. If this fails, please report this issue to the Databricks CLI maintainers at https://github.com/databricks/cli/issues/new", loginMsg) +} + func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { cmd := &cobra.Command{ Use: "token [HOST]", @@ -17,7 +49,7 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { } var tokenTimeout time.Duration - cmd.Flags().DurationVar(&tokenTimeout, "timeout", auth.DefaultTimeout, + cmd.Flags().DurationVar(&tokenTimeout, "timeout", defaultTimeout, "Timeout for acquiring a token.") cmd.RunE = func(cmd *cobra.Command, args []string) error { @@ -29,11 +61,11 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { profileName = profileFlag.Value.String() // If a profile is provided we read the host from the .databrickscfg file if profileName != "" && len(args) > 0 { - return errors.New("providing both a profile and a host parameters is not supported") + return errors.New("providing both a profile and host is not supported") } } - err := setHost(ctx, profileName, persistentAuth, args) + err := setHostAndAccountId(ctx, profileName, persistentAuth, args) if err != nil { return err } @@ -42,8 +74,21 @@ func newTokenCommand(persistentAuth *auth.PersistentAuth) *cobra.Command { ctx, cancel := context.WithTimeout(ctx, tokenTimeout) defer cancel() t, err := persistentAuth.Load(ctx) - if err != nil { - return err + var httpErr *httpclient.HttpError + if errors.As(err, &httpErr) { + helpMsg := helpfulError(profileName, persistentAuth) + t := &tokenErrorResponse{} + err = json.Unmarshal([]byte(httpErr.Message), t) + if err != nil { + return fmt.Errorf("unexpected parsing token response: %w. %s", err, helpMsg) + } + if t.ErrorDescription == "Refresh token is invalid" { + return fmt.Errorf("a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run `%s`", buildLoginCommand(profileName, persistentAuth)) + } else { + return fmt.Errorf("unexpected error refreshing token: %s. %s", t.ErrorDescription, helpMsg) + } + } else if err != nil { + return fmt.Errorf("unexpected error refreshing token: %w. %s", err, helpfulError(profileName, persistentAuth)) } raw, err := json.MarshalIndent(t, "", " ") if err != nil { diff --git a/cmd/auth/token_test.go b/cmd/auth/token_test.go new file mode 100644 index 000000000..df98cc151 --- /dev/null +++ b/cmd/auth/token_test.go @@ -0,0 +1,168 @@ +package auth_test + +import ( + "bytes" + "context" + "encoding/json" + "testing" + "time" + + "github.com/databricks/cli/cmd" + "github.com/databricks/cli/libs/auth" + "github.com/databricks/cli/libs/auth/cache" + "github.com/databricks/cli/libs/databrickscfg/profile" + "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/httpclient/fixtures" + "github.com/spf13/cobra" + "github.com/stretchr/testify/assert" + "golang.org/x/oauth2" +) + +var refreshFailureTokenResponse = fixtures.HTTPFixture{ + MatchAny: true, + Status: 401, + Response: map[string]string{ + "error": "invalid_request", + "error_description": "Refresh token is invalid", + }, +} + +var refreshFailureInvalidResponse = fixtures.HTTPFixture{ + MatchAny: true, + Status: 401, + Response: "Not json", +} + +var refreshFailureOtherError = fixtures.HTTPFixture{ + MatchAny: true, + Status: 401, + Response: map[string]string{ + "error": "other_error", + "error_description": "Databricks is down", + }, +} + +var refreshSuccessTokenResponse = fixtures.HTTPFixture{ + MatchAny: true, + Status: 200, + Response: map[string]string{ + "access_token": "new-access-token", + "token_type": "Bearer", + "expires_in": "3600", + }, +} + +func validateToken(t *testing.T, resp string) { + res := map[string]string{} + err := json.Unmarshal([]byte(resp), &res) + assert.NoError(t, err) + assert.Equal(t, "new-access-token", res["access_token"]) + assert.Equal(t, "Bearer", res["token_type"]) +} + +func getContextForTest(f fixtures.HTTPFixture) context.Context { + profiler := profile.InMemoryProfiler{ + Profiles: profile.Profiles{ + { + Name: "expired", + Host: "https://accounts.cloud.databricks.com", + AccountID: "expired", + }, + { + Name: "active", + Host: "https://accounts.cloud.databricks.com", + AccountID: "active", + }, + }, + } + tokenCache := &cache.InMemoryTokenCache{ + Tokens: map[string]*oauth2.Token{ + "https://accounts.cloud.databricks.com/oidc/accounts/expired": { + RefreshToken: "expired", + }, + "https://accounts.cloud.databricks.com/oidc/accounts/active": { + RefreshToken: "active", + Expiry: time.Now().Add(1 * time.Hour), // Hopefully unit tests don't take an hour to run + }, + }, + } + client := httpclient.NewApiClient(httpclient.ClientConfig{ + Transport: fixtures.SliceTransport{f}, + }) + ctx := profile.WithProfiler(context.Background(), profiler) + ctx = cache.WithTokenCache(ctx, tokenCache) + ctx = auth.WithApiClientForOAuth(ctx, client) + return ctx +} + +func getCobraCmdForTest(f fixtures.HTTPFixture) (*cobra.Command, *bytes.Buffer) { + ctx := getContextForTest(f) + c := cmd.New(ctx) + output := &bytes.Buffer{} + c.SetOut(output) + return c, output +} + +func TestTokenCmdWithProfilePrintsHelpfulLoginMessageOnRefreshFailure(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshFailureTokenResponse) + cmd.SetArgs([]string{"auth", "token", "--profile", "expired"}) + err := cmd.Execute() + + out := output.String() + assert.Empty(t, out) + assert.ErrorContains(t, err, "a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run ") + assert.ErrorContains(t, err, "auth login --profile expired") +} + +func TestTokenCmdWithHostPrintsHelpfulLoginMessageOnRefreshFailure(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshFailureTokenResponse) + cmd.SetArgs([]string{"auth", "token", "--host", "https://accounts.cloud.databricks.com", "--account-id", "expired"}) + err := cmd.Execute() + + out := output.String() + assert.Empty(t, out) + assert.ErrorContains(t, err, "a new access token could not be retrieved because the refresh token is invalid. To reauthenticate, run ") + assert.ErrorContains(t, err, "auth login --host https://accounts.cloud.databricks.com --account-id expired") +} + +func TestTokenCmdInvalidResponse(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshFailureInvalidResponse) + cmd.SetArgs([]string{"auth", "token", "--profile", "active"}) + err := cmd.Execute() + + out := output.String() + assert.Empty(t, out) + assert.ErrorContains(t, err, "unexpected parsing token response: invalid character 'N' looking for beginning of value. Try logging in again with ") + assert.ErrorContains(t, err, "auth login --profile active` before retrying. If this fails, please report this issue to the Databricks CLI maintainers at https://github.com/databricks/cli/issues/new") +} + +func TestTokenCmdOtherErrorResponse(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshFailureOtherError) + cmd.SetArgs([]string{"auth", "token", "--profile", "active"}) + err := cmd.Execute() + + out := output.String() + assert.Empty(t, out) + assert.ErrorContains(t, err, "unexpected error refreshing token: Databricks is down. Try logging in again with ") + assert.ErrorContains(t, err, "auth login --profile active` before retrying. If this fails, please report this issue to the Databricks CLI maintainers at https://github.com/databricks/cli/issues/new") +} + +func TestTokenCmdWithProfileSuccess(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshSuccessTokenResponse) + cmd.SetArgs([]string{"auth", "token", "--profile", "active"}) + err := cmd.Execute() + + out := output.String() + validateToken(t, out) + assert.NoError(t, err) +} + +func TestTokenCmdWithHostSuccess(t *testing.T) { + cmd, output := getCobraCmdForTest(refreshSuccessTokenResponse) + cmd.SetArgs([]string{"auth", "token", "--host", "https://accounts.cloud.databricks.com", "--account-id", "expired"}) + err := cmd.Execute() + + out := output.String() + validateToken(t, out) + assert.NoError(t, err) +} diff --git a/cmd/bundle/bundle.go b/cmd/bundle/bundle.go index a82311d83..0880c9c44 100644 --- a/cmd/bundle/bundle.go +++ b/cmd/bundle/bundle.go @@ -1,6 +1,7 @@ package bundle import ( + "github.com/databricks/cli/cmd/bundle/deployment" "github.com/spf13/cobra" ) @@ -8,7 +9,7 @@ func New() *cobra.Command { cmd := &cobra.Command{ Use: "bundle", Short: "Databricks Asset Bundles let you express data/AI/analytics projects as code.", - Long: "Databricks Asset Bundles let you express data/AI/analytics projects as code.\n\nOnline documentation: https://docs.databricks.com/en/dev-tools/bundles", + Long: "Databricks Asset Bundles let you express data/AI/analytics projects as code.\n\nOnline documentation: https://docs.databricks.com/en/dev-tools/bundles/index.html", GroupID: "development", } @@ -24,5 +25,7 @@ func New() *cobra.Command { cmd.AddCommand(newInitCommand()) cmd.AddCommand(newSummaryCommand()) cmd.AddCommand(newGenerateCommand()) + cmd.AddCommand(newDebugCommand()) + cmd.AddCommand(deployment.NewDeploymentCommand()) return cmd } diff --git a/cmd/bundle/debug.go b/cmd/bundle/debug.go new file mode 100644 index 000000000..42d16eab5 --- /dev/null +++ b/cmd/bundle/debug.go @@ -0,0 +1,18 @@ +package bundle + +import ( + "github.com/databricks/cli/cmd/bundle/debug" + "github.com/spf13/cobra" +) + +func newDebugCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "debug", + Short: "Debug information about bundles", + Long: "Debug information about bundles", + // This command group is currently intended for the Databricks VSCode extension only + Hidden: true, + } + cmd.AddCommand(debug.NewTerraformCommand()) + return cmd +} diff --git a/cmd/bundle/debug/terraform.go b/cmd/bundle/debug/terraform.go new file mode 100644 index 000000000..843ecac4e --- /dev/null +++ b/cmd/bundle/debug/terraform.go @@ -0,0 +1,78 @@ +package debug + +import ( + "encoding/json" + "fmt" + + "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/spf13/cobra" +) + +type Dependencies struct { + Terraform *terraform.TerraformMetadata `json:"terraform"` +} + +func NewTerraformCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "terraform", + Short: "Prints Terraform dependencies required for the bundle commands", + Args: root.NoArgs, + Annotations: map[string]string{ + "template": `Terraform version: {{.Version}} +Terraform URL: https://releases.hashicorp.com/terraform/{{.Version}} + +Databricks Terraform Provider version: {{.ProviderVersion}} +Databricks Terraform Provider URL: https://github.com/databricks/terraform-provider-databricks/releases/tag/v{{.ProviderVersion}} + +Databricks CLI downloads its Terraform dependencies automatically. + +If you run the CLI in an air-gapped environment, you can download the dependencies manually and set these environment variables: + + DATABRICKS_TF_VERSION={{.Version}} + DATABRICKS_TF_EXEC_PATH=/path/to/terraform/binary + DATABRICKS_TF_PROVIDER_VERSION={{.ProviderVersion}} + DATABRICKS_TF_CLI_CONFIG_FILE=/path/to/terraform/cli/config.tfrc + +Here is an example *.tfrc configuration file: + + disable_checkpoint = true + provider_installation { + filesystem_mirror { + path = "/path/to/a/folder/with/databricks/terraform/provider" + } + } + +The filesystem mirror path should point to the folder with the Databricks Terraform Provider. The folder should have this structure: /{{.ProviderHost}}/{{.ProviderSource}}/terraform-provider-databricks_{{.ProviderVersion}}_ARCH.zip + +For more information about filesystem mirrors, see the Terraform documentation: https://developer.hashicorp.com/terraform/cli/config/config-file#filesystem_mirror +`, + }, + // This command is currently intended for the Databricks VSCode extension only + Hidden: true, + } + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + dependencies := &Dependencies{ + Terraform: terraform.NewTerraformMetadata(), + } + switch root.OutputType(cmd) { + case flags.OutputText: + cmdio.Render(cmd.Context(), dependencies.Terraform) + case flags.OutputJSON: + buf, err := json.MarshalIndent(dependencies, "", " ") + if err != nil { + return err + } + cmd.OutOrStdout().Write(buf) + default: + return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) + } + + return nil + } + + return cmd +} diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index 8818bbbf4..1232c8de5 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -1,37 +1,73 @@ package bundle import ( + "context" + "fmt" + "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/bundle/render" + "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" "github.com/spf13/cobra" ) func newDeployCommand() *cobra.Command { cmd := &cobra.Command{ - Use: "deploy", - Short: "Deploy bundle", - PreRunE: ConfigureBundleWithVariables, + Use: "deploy", + Short: "Deploy bundle", + Args: root.NoArgs, } var force bool var forceLock bool + var failOnActiveRuns bool var computeID string cmd.Flags().BoolVar(&force, "force", false, "Force-override Git branch validation.") cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") + cmd.Flags().BoolVar(&failOnActiveRuns, "fail-on-active-runs", false, "Fail if there are running jobs or pipelines in the deployment.") cmd.Flags().StringVarP(&computeID, "compute-id", "c", "", "Override compute in the deployment with the given compute ID.") cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) - b.Config.Bundle.Force = force - b.Config.Bundle.Lock.Force = forceLock - b.Config.Bundle.ComputeID = computeID + if !diags.HasError() { + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) diag.Diagnostics { + b.Config.Bundle.Force = force + b.Config.Bundle.Deployment.Lock.Force = forceLock + if cmd.Flag("compute-id").Changed { + b.Config.Bundle.ComputeID = computeID + } - return bundle.Apply(cmd.Context(), b, bundle.Seq( - phases.Initialize(), - phases.Build(), - phases.Deploy(), - )) + if cmd.Flag("fail-on-active-runs").Changed { + b.Config.Bundle.Deployment.FailOnActiveRuns = failOnActiveRuns + } + + return nil + }) + + diags = diags.Extend( + bundle.Apply(ctx, b, bundle.Seq( + phases.Initialize(), + phases.Build(), + phases.Deploy(), + )), + ) + } + + renderOpts := render.RenderOptions{RenderSummaryTable: false} + err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags, renderOpts) + if err != nil { + return fmt.Errorf("failed to render output: %w", err) + } + + if diags.HasError() { + return root.ErrAlreadyPrinted + } + + return nil } return cmd diff --git a/cmd/bundle/deployment/bind.go b/cmd/bundle/deployment/bind.go new file mode 100644 index 000000000..71f441d3d --- /dev/null +++ b/cmd/bundle/deployment/bind.go @@ -0,0 +1,74 @@ +package deployment + +import ( + "context" + "fmt" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/terraform" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" + "github.com/spf13/cobra" +) + +func newBindCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "bind KEY RESOURCE_ID", + Short: "Bind bundle-defined resources to existing resources", + Args: root.ExactArgs(2), + } + + var autoApprove bool + var forceLock bool + cmd.Flags().BoolVar(&autoApprove, "auto-approve", false, "Automatically approve the binding") + cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } + + resource, err := b.Config.Resources.FindResourceByConfigKey(args[0]) + if err != nil { + return err + } + + w := b.WorkspaceClient() + exists, err := resource.Exists(ctx, w, args[1]) + if err != nil { + return fmt.Errorf("failed to fetch the resource, err: %w", err) + } + + if !exists { + return fmt.Errorf("%s with an id '%s' is not found", resource.TerraformResourceName(), args[1]) + } + + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) diag.Diagnostics { + b.Config.Bundle.Deployment.Lock.Force = forceLock + return nil + }) + + diags = bundle.Apply(ctx, b, bundle.Seq( + phases.Initialize(), + phases.Bind(&terraform.BindOptions{ + AutoApprove: autoApprove, + ResourceType: resource.TerraformResourceName(), + ResourceKey: args[0], + ResourceId: args[1], + }), + )) + if err := diags.Error(); err != nil { + return fmt.Errorf("failed to bind the resource, err: %w", err) + } + + cmdio.LogString(ctx, fmt.Sprintf("Successfully bound %s with an id '%s'. Run 'bundle deploy' to deploy changes to your workspace", resource.TerraformResourceName(), args[1])) + return nil + } + + return cmd +} diff --git a/cmd/bundle/deployment/deployment.go b/cmd/bundle/deployment/deployment.go new file mode 100644 index 000000000..d29a8e72b --- /dev/null +++ b/cmd/bundle/deployment/deployment.go @@ -0,0 +1,17 @@ +package deployment + +import ( + "github.com/spf13/cobra" +) + +func NewDeploymentCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "deployment", + Short: "Deployment related commands", + Long: "Deployment related commands", + } + + cmd.AddCommand(newBindCommand()) + cmd.AddCommand(newUnbindCommand()) + return cmd +} diff --git a/cmd/bundle/deployment/unbind.go b/cmd/bundle/deployment/unbind.go new file mode 100644 index 000000000..9de5285a5 --- /dev/null +++ b/cmd/bundle/deployment/unbind.go @@ -0,0 +1,52 @@ +package deployment + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" + "github.com/spf13/cobra" +) + +func newUnbindCommand() *cobra.Command { + cmd := &cobra.Command{ + Use: "unbind KEY", + Short: "Unbind bundle-defined resources from its managed remote resource", + Args: root.ExactArgs(1), + } + + var forceLock bool + cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.") + + cmd.RunE = func(cmd *cobra.Command, args []string) error { + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } + + resource, err := b.Config.Resources.FindResourceByConfigKey(args[0]) + if err != nil { + return err + } + + bundle.ApplyFunc(ctx, b, func(context.Context, *bundle.Bundle) diag.Diagnostics { + b.Config.Bundle.Deployment.Lock.Force = forceLock + return nil + }) + + diags = bundle.Apply(cmd.Context(), b, bundle.Seq( + phases.Initialize(), + phases.Unbind(resource.TerraformResourceName(), args[0]), + )) + if err := diags.Error(); err != nil { + return err + } + return nil + } + + return cmd +} diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index 22d998abe..cd7e63062 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -1,12 +1,16 @@ package bundle import ( + "context" "fmt" "os" "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" "golang.org/x/term" @@ -16,8 +20,7 @@ func newDestroyCommand() *cobra.Command { cmd := &cobra.Command{ Use: "destroy", Short: "Destroy deployed bundle resources", - - PreRunE: ConfigureBundleWithVariables, + Args: root.NoArgs, } var autoApprove bool @@ -27,13 +30,20 @@ func newDestroyCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } - // If `--force-lock` is specified, force acquisition of the deployment lock. - b.Config.Bundle.Lock.Force = forceDestroy + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + // If `--force-lock` is specified, force acquisition of the deployment lock. + b.Config.Bundle.Deployment.Lock.Force = forceDestroy - // If `--auto-approve`` is specified, we skip confirmation checks - b.AutoApprove = autoApprove + // If `--auto-approve`` is specified, we skip confirmation checks + b.AutoApprove = autoApprove + + return nil + }) // we require auto-approve for non tty terminals since interactive consent // is not possible @@ -50,11 +60,15 @@ func newDestroyCommand() *cobra.Command { return fmt.Errorf("please specify --auto-approve since selected logging format is json") } - return bundle.Apply(ctx, b, bundle.Seq( + diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), phases.Build(), phases.Destroy(), )) + if err := diags.Error(); err != nil { + return err + } + return nil } return cmd diff --git a/cmd/bundle/generate.go b/cmd/bundle/generate.go index 89d7c6adc..1e3d56e43 100644 --- a/cmd/bundle/generate.go +++ b/cmd/bundle/generate.go @@ -9,10 +9,9 @@ func newGenerateCommand() *cobra.Command { var key string cmd := &cobra.Command{ - Use: "generate", - Short: "Generate bundle configuration", - Long: "Generate bundle configuration", - PreRunE: ConfigureBundleWithVariables, + Use: "generate", + Short: "Generate bundle configuration", + Long: "Generate bundle configuration", } cmd.AddCommand(generate.NewGenerateJobCommand()) diff --git a/cmd/bundle/generate/generate_test.go b/cmd/bundle/generate/generate_test.go index b29bdef28..ae3710ac8 100644 --- a/cmd/bundle/generate/generate_test.go +++ b/cmd/bundle/generate/generate_test.go @@ -10,8 +10,9 @@ import ( "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" "github.com/databricks/databricks-sdk-go/experimental/mocks" + "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/databricks/databricks-sdk-go/service/workspace" "github.com/stretchr/testify/mock" @@ -23,9 +24,7 @@ func TestGeneratePipelineCommand(t *testing.T) { root := t.TempDir() b := &bundle.Bundle{ - Config: config.Root{ - Path: root, - }, + RootPath: root, } m := mocks.NewMockWorkspaceClient(t) @@ -36,6 +35,18 @@ func TestGeneratePipelineCommand(t *testing.T) { Name: "test-pipeline", Spec: &pipelines.PipelineSpec{ Name: "test-pipeline", + Clusters: []pipelines.PipelineCluster{ + { + CustomTags: map[string]string{ + "Tag1": "24X7-1234", + }, + }, + { + SparkConf: map[string]string{ + "spark.databricks.delta.preview.enabled": "true", + }, + }, + }, Libraries: []pipelines.PipelineLibrary{ {Notebook: &pipelines.NotebookLibrary{ Path: "/test/notebook", @@ -85,6 +96,11 @@ func TestGeneratePipelineCommand(t *testing.T) { pipelines: test_pipeline: name: test-pipeline + clusters: + - custom_tags: + "Tag1": "24X7-1234" + - spark_conf: + "spark.databricks.delta.preview.enabled": "true" libraries: - notebook: path: %s @@ -100,3 +116,91 @@ func TestGeneratePipelineCommand(t *testing.T) { require.NoError(t, err) require.Equal(t, "Py content", string(data)) } + +func TestGenerateJobCommand(t *testing.T) { + cmd := NewGenerateJobCommand() + + root := t.TempDir() + b := &bundle.Bundle{ + RootPath: root, + } + + m := mocks.NewMockWorkspaceClient(t) + b.SetWorkpaceClient(m.WorkspaceClient) + + jobsApi := m.GetMockJobsAPI() + jobsApi.EXPECT().Get(mock.Anything, jobs.GetJobRequest{JobId: 1234}).Return(&jobs.Job{ + Settings: &jobs.JobSettings{ + Name: "test-job", + JobClusters: []jobs.JobCluster{ + {NewCluster: compute.ClusterSpec{ + CustomTags: map[string]string{ + "Tag1": "24X7-1234", + }, + }}, + {NewCluster: compute.ClusterSpec{ + SparkConf: map[string]string{ + "spark.databricks.delta.preview.enabled": "true", + }, + }}, + }, + Tasks: []jobs.Task{ + { + TaskKey: "notebook_task", + NotebookTask: &jobs.NotebookTask{ + NotebookPath: "/test/notebook", + }, + }, + }, + }, + }, nil) + + workspaceApi := m.GetMockWorkspaceAPI() + workspaceApi.EXPECT().GetStatusByPath(mock.Anything, "/test/notebook").Return(&workspace.ObjectInfo{ + ObjectType: workspace.ObjectTypeNotebook, + Language: workspace.LanguagePython, + Path: "/test/notebook", + }, nil) + + notebookContent := io.NopCloser(bytes.NewBufferString("# Databricks notebook source\nNotebook content")) + workspaceApi.EXPECT().Download(mock.Anything, "/test/notebook", mock.Anything).Return(notebookContent, nil) + + cmd.SetContext(bundle.Context(context.Background(), b)) + cmd.Flag("existing-job-id").Value.Set("1234") + + configDir := filepath.Join(root, "resources") + cmd.Flag("config-dir").Value.Set(configDir) + + srcDir := filepath.Join(root, "src") + cmd.Flag("source-dir").Value.Set(srcDir) + + var key string + cmd.Flags().StringVar(&key, "key", "test_job", "") + + err := cmd.RunE(cmd, []string{}) + require.NoError(t, err) + + data, err := os.ReadFile(filepath.Join(configDir, "test_job.yml")) + require.NoError(t, err) + + require.Equal(t, fmt.Sprintf(`resources: + jobs: + test_job: + name: test-job + job_clusters: + - new_cluster: + custom_tags: + "Tag1": "24X7-1234" + - new_cluster: + spark_conf: + "spark.databricks.delta.preview.enabled": "true" + tasks: + - task_key: notebook_task + notebook_task: + notebook_path: %s +`, filepath.Join("..", "src", "notebook.py")), string(data)) + + data, err = os.ReadFile(filepath.Join(srcDir, "notebook.py")) + require.NoError(t, err) + require.Equal(t, "# Databricks notebook source\nNotebook content", string(data)) +} diff --git a/cmd/bundle/generate/job.go b/cmd/bundle/generate/job.go index b88b2c17b..99bc61660 100644 --- a/cmd/bundle/generate/job.go +++ b/cmd/bundle/generate/job.go @@ -5,7 +5,6 @@ import ( "os" "path/filepath" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/generate" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" @@ -14,6 +13,7 @@ import ( "github.com/databricks/cli/libs/textutil" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/spf13/cobra" + "gopkg.in/yaml.v3" ) func NewGenerateJobCommand() *cobra.Command { @@ -23,9 +23,8 @@ func NewGenerateJobCommand() *cobra.Command { var force bool cmd := &cobra.Command{ - Use: "job", - Short: "Generate bundle configuration for a job", - PreRunE: root.MustConfigureBundle, + Use: "job", + Short: "Generate bundle configuration for a job", } cmd.Flags().Int64Var(&jobId, "existing-job-id", 0, `Job ID of the job to generate config for`) @@ -42,9 +41,12 @@ func NewGenerateJobCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) - w := b.WorkspaceClient() + b, diags := root.MustConfigureBundle(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } + w := b.WorkspaceClient() job, err := w.Jobs.Get(ctx, jobs.GetJobRequest{JobId: jobId}) if err != nil { return err @@ -82,7 +84,13 @@ func NewGenerateJobCommand() *cobra.Command { } filename := filepath.Join(configDir, fmt.Sprintf("%s.yml", jobKey)) - err = yamlsaver.SaveAsYAML(result, filename, force) + saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{ + // Including all JobSettings and nested fields which are map[string]string type + "spark_conf": yaml.DoubleQuotedStyle, + "custom_tags": yaml.DoubleQuotedStyle, + "tags": yaml.DoubleQuotedStyle, + }) + err = saver.SaveAsYAML(result, filename, force) if err != nil { return err } diff --git a/cmd/bundle/generate/pipeline.go b/cmd/bundle/generate/pipeline.go index 955db34b2..bd973fe0b 100644 --- a/cmd/bundle/generate/pipeline.go +++ b/cmd/bundle/generate/pipeline.go @@ -5,7 +5,6 @@ import ( "os" "path/filepath" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config/generate" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" @@ -14,6 +13,7 @@ import ( "github.com/databricks/cli/libs/textutil" "github.com/databricks/databricks-sdk-go/service/pipelines" "github.com/spf13/cobra" + "gopkg.in/yaml.v3" ) func NewGeneratePipelineCommand() *cobra.Command { @@ -23,9 +23,8 @@ func NewGeneratePipelineCommand() *cobra.Command { var force bool cmd := &cobra.Command{ - Use: "pipeline", - Short: "Generate bundle configuration for a pipeline", - PreRunE: root.MustConfigureBundle, + Use: "pipeline", + Short: "Generate bundle configuration for a pipeline", } cmd.Flags().StringVar(&pipelineId, "existing-pipeline-id", "", `ID of the pipeline to generate config for`) @@ -42,9 +41,12 @@ func NewGeneratePipelineCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) - w := b.WorkspaceClient() + b, diags := root.MustConfigureBundle(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } + w := b.WorkspaceClient() pipeline, err := w.Pipelines.Get(ctx, pipelines.GetPipelineRequest{PipelineId: pipelineId}) if err != nil { return err @@ -82,7 +84,15 @@ func NewGeneratePipelineCommand() *cobra.Command { } filename := filepath.Join(configDir, fmt.Sprintf("%s.yml", pipelineKey)) - err = yamlsaver.SaveAsYAML(result, filename, force) + saver := yamlsaver.NewSaverWithStyle( + // Including all PipelineSpec and nested fields which are map[string]string type + map[string]yaml.Style{ + "spark_conf": yaml.DoubleQuotedStyle, + "custom_tags": yaml.DoubleQuotedStyle, + "configuration": yaml.DoubleQuotedStyle, + }, + ) + err = saver.SaveAsYAML(result, filename, force) if err != nil { return err } diff --git a/cmd/bundle/init.go b/cmd/bundle/init.go index db8250d07..c25391577 100644 --- a/cmd/bundle/init.go +++ b/cmd/bundle/init.go @@ -25,6 +25,7 @@ type nativeTemplate struct { gitUrl string description string aliases []string + hidden bool } const customTemplate = "custom..." @@ -34,12 +35,26 @@ var nativeTemplates = []nativeTemplate{ name: "default-python", description: "The default Python template for Notebooks / Delta Live Tables / Workflows", }, + { + name: "default-sql", + description: "The default SQL template for .sql files that run with Databricks SQL", + }, + { + name: "dbt-sql", + description: "The dbt SQL template (databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)", + }, { name: "mlops-stacks", gitUrl: "https://github.com/databricks/mlops-stacks", description: "The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)", aliases: []string{"mlops-stack"}, }, + { + name: "default-pydabs", + gitUrl: "https://databricks.github.io/workflows-authoring-toolkit/pydabs-template.git", + hidden: true, + description: "The default PyDABs template", + }, { name: customTemplate, description: "Bring your own template", @@ -50,7 +65,7 @@ var nativeTemplates = []nativeTemplate{ func nativeTemplateHelpDescriptions() string { var lines []string for _, template := range nativeTemplates { - if template.name != customTemplate { + if template.name != customTemplate && !template.hidden { lines = append(lines, fmt.Sprintf("- %s: %s", template.name, template.description)) } } @@ -61,6 +76,9 @@ func nativeTemplateHelpDescriptions() string { func nativeTemplateOptions() []cmdio.Tuple { names := make([]cmdio.Tuple, 0, len(nativeTemplates)) for _, template := range nativeTemplates { + if template.hidden { + continue + } tuple := cmdio.Tuple{ Name: template.name, Id: template.description, @@ -114,7 +132,7 @@ func newInitCommand() *cobra.Command { cmd := &cobra.Command{ Use: "init [TEMPLATE_PATH]", Short: "Initialize using a bundle template", - Args: cobra.MaximumNArgs(1), + Args: root.MaximumNArgs(1), Long: fmt.Sprintf(`Initialize using a bundle template. TEMPLATE_PATH optionally specifies which template to use. It can be one of the following: @@ -191,12 +209,19 @@ See https://docs.databricks.com/en/dev-tools/bundles/templates.html for more inf if err != nil { return err } + + // start the spinner + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "Downloading the template\n" + // TODO: Add automated test that the downloaded git repo is cleaned up. // Clone the repository in the temporary directory err = git.Clone(ctx, templatePath, ref, repoDir) + close(promptSpinner) if err != nil { return err } + // Clean up downloaded repository once the template is materialized. defer os.RemoveAll(repoDir) return template.Materialize(ctx, configFile, filepath.Join(repoDir, templateDir), outputDir) diff --git a/cmd/bundle/init_test.go b/cmd/bundle/init_test.go index aa8991596..475b2e149 100644 --- a/cmd/bundle/init_test.go +++ b/cmd/bundle/init_test.go @@ -30,6 +30,8 @@ func TestBundleInitRepoName(t *testing.T) { func TestNativeTemplateOptions(t *testing.T) { expected := []cmdio.Tuple{ {Name: "default-python", Id: "The default Python template for Notebooks / Delta Live Tables / Workflows"}, + {Name: "default-sql", Id: "The default SQL template for .sql files that run with Databricks SQL"}, + {Name: "dbt-sql", Id: "The dbt SQL template (databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks)"}, {Name: "mlops-stacks", Id: "The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)"}, {Name: "custom...", Id: "Bring your own template"}, } @@ -38,6 +40,8 @@ func TestNativeTemplateOptions(t *testing.T) { func TestNativeTemplateHelpDescriptions(t *testing.T) { expected := `- default-python: The default Python template for Notebooks / Delta Live Tables / Workflows +- default-sql: The default SQL template for .sql files that run with Databricks SQL +- dbt-sql: The dbt SQL template (databricks.com/blog/delivering-cost-effective-data-real-time-dbt-and-databricks) - mlops-stacks: The Databricks MLOps Stacks template (github.com/databricks/mlops-stacks)` assert.Equal(t, expected, nativeTemplateHelpDescriptions()) } diff --git a/cmd/bundle/launch.go b/cmd/bundle/launch.go index bbb43600a..0d2b4233b 100644 --- a/cmd/bundle/launch.go +++ b/cmd/bundle/launch.go @@ -12,12 +12,10 @@ func newLaunchCommand() *cobra.Command { Use: "launch", Short: "Launches a notebook on development cluster", Long: `Reads a file and executes it on dev cluster`, - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), // We're not ready to expose this command until we specify its semantics. Hidden: true, - - PreRunE: root.MustConfigureBundle, } cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index c9e35aa3b..63458f85c 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/bundle/run" + "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" @@ -17,30 +18,51 @@ import ( func newRunCommand() *cobra.Command { cmd := &cobra.Command{ Use: "run [flags] KEY", - Short: "Run a resource (e.g. a job or a pipeline)", + Short: "Run a job or pipeline update", + Long: `Run the job or pipeline identified by KEY. - Args: cobra.MaximumNArgs(1), - PreRunE: ConfigureBundleWithVariables, +The KEY is the unique identifier of the resource to run. In addition to +customizing the run using any of the available flags, you can also specify +keyword or positional arguments as shown in these examples: + + databricks bundle run my_job -- --key1 value1 --key2 value2 + +Or: + + databricks bundle run my_job -- value1 value2 value3 + +If the specified job uses job parameters or the job has a notebook task with +parameters, the first example applies and flag names are mapped to the +parameter names. + +If the specified job does not use job parameters and the job has a Python file +task or a Python wheel task, the second example applies. +`, } var runOptions run.Options - runOptions.Define(cmd.Flags()) + runOptions.Define(cmd) var noWait bool + var restart bool cmd.Flags().BoolVar(&noWait, "no-wait", false, "Don't wait for the run to complete.") + cmd.Flags().BoolVar(&restart, "restart", false, "Restart the run if it is already running.") cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - b := bundle.Get(ctx) + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } - err := bundle.Apply(ctx, b, bundle.Seq( + diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), terraform.Interpolate(), terraform.Write(), terraform.StatePull(), terraform.Load(terraform.ErrorOnEmptyState), )) - if err != nil { + if err := diags.Error(); err != nil { return err } @@ -58,7 +80,7 @@ func newRunCommand() *cobra.Command { args = append(args, id) } - if len(args) != 1 { + if len(args) < 1 { return fmt.Errorf("expected a KEY of the resource to run") } @@ -67,7 +89,22 @@ func newRunCommand() *cobra.Command { return err } + // Parse additional positional arguments. + err = runner.ParseArgs(args[1:], &runOptions) + if err != nil { + return err + } + runOptions.NoWait = noWait + if restart { + s := cmdio.Spinner(ctx) + s <- "Cancelling all runs" + err := runner.Cancel(ctx) + close(s) + if err != nil { + return err + } + } output, err := runner.Run(ctx, &runOptions) if err != nil { return err @@ -94,24 +131,28 @@ func newRunCommand() *cobra.Command { } cmd.ValidArgsFunction = func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - if len(args) > 0 { - return nil, cobra.ShellCompDirectiveNoFileComp - } - - err := root.MustConfigureBundle(cmd, args) - if err != nil { + b, diags := root.MustConfigureBundle(cmd) + if err := diags.Error(); err != nil { cobra.CompErrorln(err.Error()) return nil, cobra.ShellCompDirectiveError } // No completion in the context of a bundle. // Source and destination paths are taken from bundle configuration. - b := bundle.GetOrNil(cmd.Context()) if b == nil { return nil, cobra.ShellCompDirectiveNoFileComp } - return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp + if len(args) == 0 { + return run.ResourceCompletions(b), cobra.ShellCompDirectiveNoFileComp + } else { + // If we know the resource to run, we can complete additional positional arguments. + runner, err := run.Find(b, args[0]) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + return runner.CompleteArgs(args[1:], toComplete) + } } return cmd diff --git a/cmd/bundle/schema.go b/cmd/bundle/schema.go index f516695c7..813aebbae 100644 --- a/cmd/bundle/schema.go +++ b/cmd/bundle/schema.go @@ -6,6 +6,8 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/schema" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/jsonschema" "github.com/spf13/cobra" ) @@ -13,6 +15,7 @@ func newSchemaCommand() *cobra.Command { cmd := &cobra.Command{ Use: "schema", Short: "Generate JSON Schema for bundle configuration", + Args: root.NoArgs, } cmd.RunE = func(cmd *cobra.Command, args []string) error { @@ -28,6 +31,13 @@ func newSchemaCommand() *cobra.Command { return err } + // Target variable value overrides can be primitives, maps or sequences. + // Set an empty schema for them. + err = schema.SetByPath("targets.*.variables.*", jsonschema.Schema{}) + if err != nil { + return err + } + // Print the JSON schema to stdout. result, err := json.MarshalIndent(schema, "", " ") if err != nil { diff --git a/cmd/bundle/summary.go b/cmd/bundle/summary.go index efa3c679d..5a64b46c0 100644 --- a/cmd/bundle/summary.go +++ b/cmd/bundle/summary.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/deploy/terraform" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" @@ -19,8 +20,7 @@ func newSummaryCommand() *cobra.Command { cmd := &cobra.Command{ Use: "summary", Short: "Describe the bundle resources and their deployment states", - - PreRunE: ConfigureBundleWithVariables, + Args: root.NoArgs, // This command is currently intended for the Databricks VSCode extension only Hidden: true, @@ -30,29 +30,38 @@ func newSummaryCommand() *cobra.Command { cmd.Flags().BoolVar(&forcePull, "force-pull", false, "Skip local cache and load the state from the remote workspace") cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } - err := bundle.Apply(cmd.Context(), b, phases.Initialize()) - if err != nil { + diags = bundle.Apply(ctx, b, phases.Initialize()) + if err := diags.Error(); err != nil { return err } - cacheDir, err := terraform.Dir(cmd.Context(), b) + cacheDir, err := terraform.Dir(ctx, b) if err != nil { return err } - _, err = os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName)) - noCache := errors.Is(err, os.ErrNotExist) + _, stateFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformStateFileName)) + _, configFileErr := os.Stat(filepath.Join(cacheDir, terraform.TerraformConfigFileName)) + noCache := errors.Is(stateFileErr, os.ErrNotExist) || errors.Is(configFileErr, os.ErrNotExist) if forcePull || noCache { - err = bundle.Apply(cmd.Context(), b, terraform.StatePull()) - if err != nil { + diags = bundle.Apply(ctx, b, bundle.Seq( + terraform.StatePull(), + terraform.Interpolate(), + terraform.Write(), + )) + if err := diags.Error(); err != nil { return err } } - err = bundle.Apply(cmd.Context(), b, terraform.Load()) - if err != nil { + diags = bundle.Apply(ctx, b, terraform.Load()) + if err := diags.Error(); err != nil { return err } diff --git a/cmd/bundle/sync.go b/cmd/bundle/sync.go index ca81275b7..df3e087c2 100644 --- a/cmd/bundle/sync.go +++ b/cmd/bundle/sync.go @@ -5,7 +5,10 @@ import ( "time" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/sync" "github.com/spf13/cobra" @@ -18,37 +21,21 @@ type syncFlags struct { } func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, b *bundle.Bundle) (*sync.SyncOptions, error) { - cacheDir, err := b.CacheDir(cmd.Context()) + opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b)) if err != nil { - return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) + return nil, fmt.Errorf("cannot get sync options: %w", err) } - includes, err := b.GetSyncIncludePatterns(cmd.Context()) - if err != nil { - return nil, fmt.Errorf("cannot get list of sync includes: %w", err) - } - - opts := sync.SyncOptions{ - LocalPath: b.Config.Path, - RemotePath: b.Config.Workspace.FilePath, - Include: includes, - Exclude: b.Config.Sync.Exclude, - Full: f.full, - PollInterval: f.interval, - - SnapshotBasePath: cacheDir, - WorkspaceClient: b.WorkspaceClient(), - } - return &opts, nil + opts.Full = f.full + opts.PollInterval = f.interval + return opts, nil } func newSyncCommand() *cobra.Command { cmd := &cobra.Command{ Use: "sync [flags]", Short: "Synchronize bundle tree to the workspace", - Args: cobra.NoArgs, - - PreRunE: ConfigureBundleWithVariables, + Args: root.NoArgs, } var f syncFlags @@ -57,11 +44,15 @@ func newSyncCommand() *cobra.Command { cmd.Flags().BoolVar(&f.watch, "watch", false, "watch local file system for changes") cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) + if err := diags.Error(); err != nil { + return diags.Error() + } // Run initialize phase to make sure paths are set. - err := bundle.Apply(cmd.Context(), b, phases.Initialize()) - if err != nil { + diags = bundle.Apply(ctx, b, phases.Initialize()) + if err := diags.Error(); err != nil { return err } @@ -70,7 +61,6 @@ func newSyncCommand() *cobra.Command { return err } - ctx := cmd.Context() s, err := sync.New(ctx, *opts) if err != nil { return err @@ -82,7 +72,8 @@ func newSyncCommand() *cobra.Command { return s.RunContinuous(ctx) } - return s.RunOnce(ctx) + _, err = s.RunOnce(ctx) + return err } return cmd diff --git a/cmd/bundle/test.go b/cmd/bundle/test.go index ea1a4b716..4d30e727d 100644 --- a/cmd/bundle/test.go +++ b/cmd/bundle/test.go @@ -3,7 +3,6 @@ package bundle import ( "fmt" - "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) @@ -15,8 +14,6 @@ func newTestCommand() *cobra.Command { // We're not ready to expose this command until we specify its semantics. Hidden: true, - - PreRunE: root.MustConfigureBundle, } cmd.RunE = func(cmd *cobra.Command, args []string) error { diff --git a/cmd/bundle/utils/utils.go b/cmd/bundle/utils/utils.go new file mode 100644 index 000000000..ce3774cf5 --- /dev/null +++ b/cmd/bundle/utils/utils.go @@ -0,0 +1,35 @@ +package utils + +import ( + "context" + + "github.com/databricks/cli/bundle" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" + "github.com/spf13/cobra" +) + +func configureVariables(cmd *cobra.Command, b *bundle.Bundle, variables []string) diag.Diagnostics { + return bundle.ApplyFunc(cmd.Context(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + err := b.Config.InitializeVariables(variables) + return diag.FromErr(err) + }) +} + +func ConfigureBundleWithVariables(cmd *cobra.Command) (*bundle.Bundle, diag.Diagnostics) { + // Load bundle config and apply target + b, diags := root.MustConfigureBundle(cmd) + if diags.HasError() { + return b, diags + } + + variables, err := cmd.Flags().GetStringSlice("var") + if err != nil { + return b, diag.FromErr(err) + } + + // Initialize variables by assigning them values passed as command line flags + diags = diags.Extend(configureVariables(cmd, b, variables)) + + return b, diags +} diff --git a/cmd/bundle/validate.go b/cmd/bundle/validate.go index b98cbd52d..496d5d2b5 100644 --- a/cmd/bundle/validate.go +++ b/cmd/bundle/validate.go @@ -2,34 +2,73 @@ package bundle import ( "encoding/json" + "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/config/validate" "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/bundle/render" + "github.com/databricks/cli/cmd/bundle/utils" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" + "github.com/databricks/cli/libs/flags" "github.com/spf13/cobra" ) +func renderJsonOutput(cmd *cobra.Command, b *bundle.Bundle, diags diag.Diagnostics) error { + buf, err := json.MarshalIndent(b.Config.Value().AsAny(), "", " ") + if err != nil { + return err + } + cmd.OutOrStdout().Write(buf) + return diags.Error() +} + func newValidateCommand() *cobra.Command { cmd := &cobra.Command{ Use: "validate", Short: "Validate configuration", - - PreRunE: ConfigureBundleWithVariables, + Args: root.NoArgs, } cmd.RunE = func(cmd *cobra.Command, args []string) error { - b := bundle.Get(cmd.Context()) + ctx := cmd.Context() + b, diags := utils.ConfigureBundleWithVariables(cmd) - err := bundle.Apply(cmd.Context(), b, phases.Initialize()) - if err != nil { - return err + if b == nil { + if err := diags.Error(); err != nil { + return diags.Error() + } else { + return fmt.Errorf("invariant failed: returned bundle is nil") + } } - buf, err := json.MarshalIndent(b.Config, "", " ") - if err != nil { - return err + if !diags.HasError() { + diags = diags.Extend(bundle.Apply(ctx, b, phases.Initialize())) + } + + if !diags.HasError() { + diags = diags.Extend(bundle.Apply(ctx, b, validate.Validate())) + } + + switch root.OutputType(cmd) { + case flags.OutputText: + renderOpts := render.RenderOptions{RenderSummaryTable: true} + err := render.RenderTextOutput(cmd.OutOrStdout(), b, diags, renderOpts) + if err != nil { + return fmt.Errorf("failed to render output: %w", err) + } + + if diags.HasError() { + return root.ErrAlreadyPrinted + } + + return nil + case flags.OutputJSON: + return renderJsonOutput(cmd, b, diags) + default: + return fmt.Errorf("unknown output type %s", root.OutputType(cmd)) } - cmd.OutOrStdout().Write(buf) - return nil } return cmd diff --git a/cmd/bundle/variables.go b/cmd/bundle/variables.go index c3e4af645..f8f5167ea 100644 --- a/cmd/bundle/variables.go +++ b/cmd/bundle/variables.go @@ -1,28 +1,9 @@ package bundle import ( - "github.com/databricks/cli/bundle" - "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) -func ConfigureBundleWithVariables(cmd *cobra.Command, args []string) error { - // Load bundle config and apply target - err := root.MustConfigureBundle(cmd, args) - if err != nil { - return err - } - - variables, err := cmd.Flags().GetStringSlice("var") - if err != nil { - return err - } - - // Initialize variables by assigning them values passed as command line flags - b := bundle.Get(cmd.Context()) - return b.Config.InitializeVariables(variables) -} - func initVariableFlag(cmd *cobra.Command) { cmd.PersistentFlags().StringSlice("var", []string{}, `set values for variables defined in bundle config. Example: --var="foo=bar"`) } diff --git a/cmd/configure/configure.go b/cmd/configure/configure.go index 1e94ddae8..895a5902c 100644 --- a/cmd/configure/configure.go +++ b/cmd/configure/configure.go @@ -139,10 +139,11 @@ The host must be specified with the --host flag or the DATABRICKS_HOST environme // Save profile to config file. return databrickscfg.SaveToProfile(ctx, &config.Config{ - Profile: cfg.Profile, - Host: cfg.Host, - Token: cfg.Token, - ClusterID: cfg.ClusterID, + Profile: cfg.Profile, + Host: cfg.Host, + Token: cfg.Token, + ClusterID: cfg.ClusterID, + ConfigFile: cfg.ConfigFile, }) } diff --git a/cmd/configure/configure_test.go b/cmd/configure/configure_test.go index 259c83adb..a127fe57a 100644 --- a/cmd/configure/configure_test.go +++ b/cmd/configure/configure_test.go @@ -78,7 +78,8 @@ func TestConfigFileFromEnvNoInteractive(t *testing.T) { //TODO: Replace with similar test code from go SDK, once we start using it directly ctx := context.Background() tempHomeDir := setup(t) - cfgPath := filepath.Join(tempHomeDir, ".databrickscfg") + defaultCfgPath := filepath.Join(tempHomeDir, ".databrickscfg") + cfgPath := filepath.Join(tempHomeDir, "overwrite-databricks-cfg") t.Setenv("DATABRICKS_CONFIG_FILE", cfgPath) inp := getTempFileWithContent(t, tempHomeDir, "token\n") @@ -96,6 +97,13 @@ func TestConfigFileFromEnvNoInteractive(t *testing.T) { _, err = os.Stat(cfgPath) assert.NoError(t, err) + _, err = os.Stat(defaultCfgPath) + if runtime.GOOS == "windows" { + assert.ErrorContains(t, err, "cannot find the file specified") + } else { + assert.ErrorContains(t, err, "no such file or directory") + } + cfg, err := ini.Load(cfgPath) assert.NoError(t, err) diff --git a/cmd/fs/cat.go b/cmd/fs/cat.go index 8227cd781..7a6f42cba 100644 --- a/cmd/fs/cat.go +++ b/cmd/fs/cat.go @@ -9,9 +9,9 @@ import ( func newCatCommand() *cobra.Command { cmd := &cobra.Command{ Use: "cat FILE_PATH", - Short: "Show file content", - Long: `Show the contents of a file.`, - Args: cobra.ExactArgs(1), + Short: "Show file content.", + Long: `Show the contents of a file in DBFS or a UC Volume.`, + Args: root.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } @@ -27,7 +27,7 @@ func newCatCommand() *cobra.Command { if err != nil { return err } - return cmdio.RenderReader(ctx, r) + return cmdio.Render(ctx, r) } return cmd diff --git a/cmd/fs/cp.go b/cmd/fs/cp.go index 97fceb93c..52feb8905 100644 --- a/cmd/fs/cp.go +++ b/cmd/fs/cp.go @@ -107,7 +107,7 @@ func (c *copy) emitFileSkippedEvent(sourcePath, targetPath string) error { event := newFileSkippedEvent(fullSourcePath, fullTargetPath) template := "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n" - return cmdio.RenderWithTemplate(c.ctx, event, template) + return cmdio.RenderWithTemplate(c.ctx, event, "", template) } func (c *copy) emitFileCopiedEvent(sourcePath, targetPath string) error { @@ -123,16 +123,16 @@ func (c *copy) emitFileCopiedEvent(sourcePath, targetPath string) error { event := newFileCopiedEvent(fullSourcePath, fullTargetPath) template := "{{.SourcePath}} -> {{.TargetPath}}\n" - return cmdio.RenderWithTemplate(c.ctx, event, template) + return cmdio.RenderWithTemplate(c.ctx, event, "", template) } func newCpCommand() *cobra.Command { cmd := &cobra.Command{ Use: "cp SOURCE_PATH TARGET_PATH", - Short: "Copy files and directories to and from DBFS.", - Long: `Copy files to and from DBFS. + Short: "Copy files and directories.", + Long: `Copy files and directories to and from any paths on DBFS, UC Volumes or your local filesystem. - For paths in DBFS it is required that you specify the "dbfs" scheme. + For paths in DBFS and UC Volumes, it is required that you specify the "dbfs" scheme. For example: dbfs:/foo/bar. Recursively copying a directory will copy all files inside directory @@ -141,7 +141,7 @@ func newCpCommand() *cobra.Command { When copying a file, if TARGET_PATH is a directory, the file will be created inside the directory, otherwise the file is created at TARGET_PATH. `, - Args: cobra.ExactArgs(2), + Args: root.ExactArgs(2), PreRunE: root.MustWorkspaceClient, } @@ -152,9 +152,6 @@ func newCpCommand() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) error { ctx := cmd.Context() - // TODO: Error if a user uses '\' as path separator on windows when "file" - // scheme is specified (https://github.com/databricks/cli/issues/485) - // Get source filer and source path without scheme fullSourcePath := args[0] sourceFiler, sourcePath, err := filerForPath(ctx, fullSourcePath) diff --git a/cmd/fs/fs.go b/cmd/fs/fs.go index 01d8a745b..1f36696a6 100644 --- a/cmd/fs/fs.go +++ b/cmd/fs/fs.go @@ -8,7 +8,7 @@ func New() *cobra.Command { cmd := &cobra.Command{ Use: "fs", Short: "Filesystem related commands", - Long: `Commands to do DBFS operations.`, + Long: `Commands to do file system operations on DBFS and UC Volumes.`, GroupID: "workspace", } diff --git a/cmd/fs/ls.go b/cmd/fs/ls.go index 7ae55e1f4..cec9b98ba 100644 --- a/cmd/fs/ls.go +++ b/cmd/fs/ls.go @@ -40,9 +40,9 @@ func toJsonDirEntry(f fs.DirEntry, baseDir string, isAbsolute bool) (*jsonDirEnt func newLsCommand() *cobra.Command { cmd := &cobra.Command{ Use: "ls DIR_PATH", - Short: "Lists files", - Long: `Lists files`, - Args: cobra.ExactArgs(1), + Short: "Lists files.", + Long: `Lists files in DBFS and UC Volumes.`, + Args: root.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } @@ -78,12 +78,12 @@ func newLsCommand() *cobra.Command { // Use template for long mode if the flag is set if long { - return cmdio.RenderWithTemplate(ctx, jsonDirEntries, cmdio.Heredoc(` + return cmdio.RenderWithTemplate(ctx, jsonDirEntries, "", cmdio.Heredoc(` {{range .}}{{if .IsDir}}DIRECTORY {{else}}FILE {{end}}{{.Size}} {{.ModTime|pretty_date}} {{.Name}} {{end}} `)) } - return cmdio.RenderWithTemplate(ctx, jsonDirEntries, cmdio.Heredoc(` + return cmdio.RenderWithTemplate(ctx, jsonDirEntries, "", cmdio.Heredoc(` {{range .}}{{.Name}} {{end}} `)) diff --git a/cmd/fs/mkdir.go b/cmd/fs/mkdir.go index c6a5e607c..074a7543d 100644 --- a/cmd/fs/mkdir.go +++ b/cmd/fs/mkdir.go @@ -11,9 +11,9 @@ func newMkdirCommand() *cobra.Command { // Alias `mkdirs` for this command exists for legacy purposes. This command // is called databricks fs mkdirs in our legacy CLI: https://github.com/databricks/databricks-cli Aliases: []string{"mkdirs"}, - Short: "Make directories", - Long: `Mkdir will create directories along the path to the argument directory.`, - Args: cobra.ExactArgs(1), + Short: "Make directories.", + Long: `Make directories in DBFS and UC Volumes. Mkdir will create directories along the path to the argument directory.`, + Args: root.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/fs/rm.go b/cmd/fs/rm.go index 3ce8d3b93..5f2904e71 100644 --- a/cmd/fs/rm.go +++ b/cmd/fs/rm.go @@ -9,9 +9,9 @@ import ( func newRmCommand() *cobra.Command { cmd := &cobra.Command{ Use: "rm PATH", - Short: "Remove files and directories from dbfs.", - Long: `Remove files and directories from dbfs.`, - Args: cobra.ExactArgs(1), + Short: "Remove files and directories.", + Long: `Remove files and directories from DBFS and UC Volumes.`, + Args: root.ExactArgs(1), PreRunE: root.MustWorkspaceClient, } diff --git a/cmd/labs/install.go b/cmd/labs/install.go index 31db43892..6ed6b2e91 100644 --- a/cmd/labs/install.go +++ b/cmd/labs/install.go @@ -2,13 +2,14 @@ package labs import ( "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) func newInstallCommand() *cobra.Command { return &cobra.Command{ Use: "install NAME", - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: "Installs project", RunE: func(cmd *cobra.Command, args []string) error { inst, err := project.NewInstaller(cmd, args[0]) diff --git a/cmd/labs/project/entrypoint.go b/cmd/labs/project/entrypoint.go index 96f46d4b5..99edf83c8 100644 --- a/cmd/labs/project/entrypoint.go +++ b/cmd/labs/project/entrypoint.go @@ -10,7 +10,6 @@ import ( "path/filepath" "strings" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" @@ -203,11 +202,11 @@ func (e *Entrypoint) getLoginConfig(cmd *cobra.Command) (*loginConfig, *config.C return lc, cfg, nil } if e.IsBundleAware { - err = root.TryConfigureBundle(cmd, []string{}) - if err != nil { + b, diags := root.TryConfigureBundle(cmd) + if err := diags.Error(); err != nil { return nil, nil, fmt.Errorf("bundle: %w", err) } - if b := bundle.GetOrNil(cmd.Context()); b != nil { + if b != nil { log.Infof(ctx, "Using login configuration from Databricks Asset Bundle") return &loginConfig{}, b.WorkspaceClient().Config, nil } diff --git a/cmd/labs/project/installed.go b/cmd/labs/project/installed.go index 9a98a780c..fb349531b 100644 --- a/cmd/labs/project/installed.go +++ b/cmd/labs/project/installed.go @@ -8,8 +8,8 @@ import ( "os" "path/filepath" - "github.com/databricks/cli/folders" "github.com/databricks/cli/libs/env" + "github.com/databricks/cli/libs/folders" "github.com/databricks/cli/libs/log" ) diff --git a/cmd/labs/project/installer.go b/cmd/labs/project/installer.go index 235d29bc4..92dfe9e7c 100644 --- a/cmd/labs/project/installer.go +++ b/cmd/labs/project/installer.go @@ -11,8 +11,8 @@ import ( "github.com/databricks/cli/cmd/labs/github" "github.com/databricks/cli/cmd/labs/unpack" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/databrickscfg" "github.com/databricks/cli/libs/databrickscfg/cfgpickers" + "github.com/databricks/cli/libs/databrickscfg/profile" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/process" "github.com/databricks/cli/libs/python" @@ -89,7 +89,7 @@ func (i *installer) Install(ctx context.Context) error { return err } w, err := i.login(ctx) - if err != nil && errors.Is(err, databrickscfg.ErrNoConfiguration) { + if err != nil && errors.Is(err, profile.ErrNoConfiguration) { cfg, err := i.Installer.envAwareConfig(ctx) if err != nil { return err @@ -136,6 +136,10 @@ func (i *installer) Upgrade(ctx context.Context) error { if err != nil { return fmt.Errorf("installer: %w", err) } + err = i.installPythonDependencies(ctx, ".") + if err != nil { + return fmt.Errorf("python dependencies: %w", err) + } return nil } diff --git a/cmd/labs/project/installer_test.go b/cmd/labs/project/installer_test.go index 709e14f20..0e049b4c0 100644 --- a/cmd/labs/project/installer_test.go +++ b/cmd/labs/project/installer_test.go @@ -403,6 +403,12 @@ func TestUpgraderWorksForReleases(t *testing.T) { newHome := copyTestdata(t, "testdata/installed-in-home") ctx = env.WithUserHomeDir(ctx, newHome) + // Install stubs for the python calls we need to ensure were run in the + // upgrade process. + ctx, stub := process.WithStub(ctx) + stub.WithStderrFor(`python[\S]+ -m pip install .`, "[mock pip install]") + stub.WithStdoutFor(`python[\S]+ install.py`, "setting up important infrastructure") + py, _ := python.DetectExecutable(ctx) py, _ = filepath.Abs(py) ctx = env.Set(ctx, "PYTHON_BIN", py) @@ -420,4 +426,17 @@ func TestUpgraderWorksForReleases(t *testing.T) { r := internal.NewCobraTestRunnerWithContext(t, ctx, "labs", "upgrade", "blueprint") r.RunAndExpectOutput("setting up important infrastructure") + + // Check if the stub was called with the 'python -m pip install' command + pi := false + for _, call := range stub.Commands() { + if strings.HasSuffix(call, "-m pip install .") { + pi = true + break + } + } + if !pi { + t.Logf(`Expected stub command 'python[\S]+ -m pip install .' not found`) + t.FailNow() + } } diff --git a/cmd/labs/project/proxy.go b/cmd/labs/project/proxy.go index d872560a5..ee1b0aa91 100644 --- a/cmd/labs/project/proxy.go +++ b/cmd/labs/project/proxy.go @@ -87,7 +87,7 @@ func (cp *proxy) renderJsonAsTable(cmd *cobra.Command, args []string, envs map[s } // IntelliJ eagerly replaces tabs with spaces, even though we're not asking for it fixedTemplate := strings.ReplaceAll(cp.TableTemplate, "\\t", "\t") - return cmdio.RenderWithTemplate(ctx, anyVal, fixedTemplate) + return cmdio.RenderWithTemplate(ctx, anyVal, "", fixedTemplate) } func (cp *proxy) commandInput(cmd *cobra.Command) ([]string, error) { diff --git a/cmd/labs/show.go b/cmd/labs/show.go index 1ae6498c8..c36f0bda3 100644 --- a/cmd/labs/show.go +++ b/cmd/labs/show.go @@ -4,6 +4,7 @@ import ( "fmt" "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/spf13/cobra" ) @@ -11,7 +12,7 @@ import ( func newShowCommand() *cobra.Command { return &cobra.Command{ Use: "show NAME", - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: "Shows information about the project", Annotations: map[string]string{ "template": cmdio.Heredoc(` diff --git a/cmd/labs/uninstall.go b/cmd/labs/uninstall.go index b2c83fff7..424df38db 100644 --- a/cmd/labs/uninstall.go +++ b/cmd/labs/uninstall.go @@ -4,13 +4,14 @@ import ( "fmt" "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) func newUninstallCommand() *cobra.Command { return &cobra.Command{ Use: "uninstall NAME", - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: "Uninstalls project", ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { var names []string diff --git a/cmd/labs/upgrade.go b/cmd/labs/upgrade.go index 88b7bc928..d0a8dec9b 100644 --- a/cmd/labs/upgrade.go +++ b/cmd/labs/upgrade.go @@ -2,13 +2,14 @@ package labs import ( "github.com/databricks/cli/cmd/labs/project" + "github.com/databricks/cli/cmd/root" "github.com/spf13/cobra" ) func newUpgradeCommand() *cobra.Command { return &cobra.Command{ Use: "upgrade NAME", - Args: cobra.ExactArgs(1), + Args: root.ExactArgs(1), Short: "Upgrades project", RunE: func(cmd *cobra.Command, args []string) error { inst, err := project.NewUpgrader(cmd, args[0]) diff --git a/cmd/root/args.go b/cmd/root/args.go new file mode 100644 index 000000000..800d6add7 --- /dev/null +++ b/cmd/root/args.go @@ -0,0 +1,45 @@ +package root + +import ( + "fmt" + + "github.com/spf13/cobra" +) + +type InvalidArgsError struct { + // The command that was run. + Command *cobra.Command + // The error message. + Message string +} + +func (e *InvalidArgsError) Error() string { + return fmt.Sprintf("%s\n\n%s", e.Message, e.Command.UsageString()) +} + +func ExactArgs(n int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) != n { + return &InvalidArgsError{Message: fmt.Sprintf("accepts %d arg(s), received %d", n, len(args)), Command: cmd} + } + return nil + } +} + +func NoArgs(cmd *cobra.Command, args []string) error { + if len(args) > 0 { + msg := fmt.Sprintf("unknown command %q for %q", args[0], cmd.CommandPath()) + return &InvalidArgsError{Message: msg, Command: cmd} + } + return nil +} + +func MaximumNArgs(n int) cobra.PositionalArgs { + return func(cmd *cobra.Command, args []string) error { + if len(args) > n { + msg := fmt.Sprintf("accepts at most %d arg(s), received %d", n, len(args)) + return &InvalidArgsError{Message: msg, Command: cmd} + } + return nil + } +} diff --git a/cmd/root/auth.go b/cmd/root/auth.go index a2cdd04fe..107679105 100644 --- a/cmd/root/auth.go +++ b/cmd/root/auth.go @@ -6,9 +6,8 @@ import ( "fmt" "net/http" - "github.com/databricks/cli/bundle" "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/databrickscfg" + "github.com/databricks/cli/libs/databrickscfg/profile" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/config" "github.com/manifoldco/promptui" @@ -18,10 +17,27 @@ import ( // Placeholders to use as unique keys in context.Context. var workspaceClient int var accountClient int +var configUsed int + +type ErrNoWorkspaceProfiles struct { + path string +} + +func (e ErrNoWorkspaceProfiles) Error() string { + return fmt.Sprintf("%s does not contain workspace profiles; please create one by running 'databricks configure'", e.path) +} + +type ErrNoAccountProfiles struct { + path string +} + +func (e ErrNoAccountProfiles) Error() string { + return fmt.Sprintf("%s does not contain account profiles", e.path) +} func initProfileFlag(cmd *cobra.Command) { cmd.PersistentFlags().StringP("profile", "p", "", "~/.databrickscfg profile") - cmd.RegisterFlagCompletionFunc("profile", databrickscfg.ProfileCompletion) + cmd.RegisterFlagCompletionFunc("profile", profile.ProfileCompletion) } func profileFlagValue(cmd *cobra.Command) (string, bool) { @@ -68,27 +84,56 @@ func accountClientOrPrompt(ctx context.Context, cfg *config.Config, allowPrompt return a, err } +func MustAnyClient(cmd *cobra.Command, args []string) (bool, error) { + // Try to create a workspace client + werr := MustWorkspaceClient(cmd, args) + if werr == nil { + return false, nil + } + + // If the error is other than "not a workspace client error" or "no workspace profiles", + // return it because configuration is for workspace client + // and we don't want to try to create an account client. + if !errors.Is(werr, databricks.ErrNotWorkspaceClient) && !errors.As(werr, &ErrNoWorkspaceProfiles{}) { + return false, werr + } + + // Otherwise, the config used is account client one, so try to create an account client + aerr := MustAccountClient(cmd, args) + if errors.As(aerr, &ErrNoAccountProfiles{}) { + return false, aerr + } + + return true, aerr +} + func MustAccountClient(cmd *cobra.Command, args []string) error { cfg := &config.Config{} // The command-line profile flag takes precedence over DATABRICKS_CONFIG_PROFILE. - profile, hasProfileFlag := profileFlagValue(cmd) + pr, hasProfileFlag := profileFlagValue(cmd) if hasProfileFlag { - cfg.Profile = profile + cfg.Profile = pr } + ctx := cmd.Context() + ctx = context.WithValue(ctx, &configUsed, cfg) + cmd.SetContext(ctx) + + profiler := profile.GetProfiler(ctx) + if cfg.Profile == "" { // account-level CLI was not really done before, so here are the assumptions: // 1. only admins will have account configured // 2. 99% of admins will have access to just one account // hence, we don't need to create a special "DEFAULT_ACCOUNT" profile yet - _, profiles, err := databrickscfg.LoadProfiles(cmd.Context(), databrickscfg.MatchAccountProfiles) + profiles, err := profiler.LoadProfiles(cmd.Context(), profile.MatchAccountProfiles) if err == nil && len(profiles) == 1 { cfg.Profile = profiles[0].Name } // if there is no config file, we don't want to fail and instead just skip it - if err != nil && !errors.Is(err, databrickscfg.ErrNoConfiguration) { + if err != nil && !errors.Is(err, profile.ErrNoConfiguration) { return err } } @@ -99,7 +144,8 @@ func MustAccountClient(cmd *cobra.Command, args []string) error { return err } - cmd.SetContext(context.WithValue(cmd.Context(), &accountClient, a)) + ctx = context.WithValue(ctx, &accountClient, a) + cmd.SetContext(ctx) return nil } @@ -147,13 +193,20 @@ func MustWorkspaceClient(cmd *cobra.Command, args []string) error { cfg.Profile = profile } + ctx := cmd.Context() + ctx = context.WithValue(ctx, &configUsed, cfg) + cmd.SetContext(ctx) + // Try to load a bundle configuration if we're allowed to by the caller (see `./auth_options.go`). if !shouldSkipLoadBundle(cmd.Context()) { - err := TryConfigureBundle(cmd, args) - if err != nil { + b, diags := TryConfigureBundle(cmd) + if err := diags.Error(); err != nil { return err } - if b := bundle.GetOrNil(cmd.Context()); b != nil { + + if b != nil { + ctx = context.WithValue(ctx, &configUsed, b.Config.Workspace.Config()) + cmd.SetContext(ctx) client, err := b.InitializeWorkspaceClient() if err != nil { return err @@ -168,7 +221,6 @@ func MustWorkspaceClient(cmd *cobra.Command, args []string) error { return err } - ctx := cmd.Context() ctx = context.WithValue(ctx, &workspaceClient, w) cmd.SetContext(ctx) return nil @@ -178,23 +230,28 @@ func SetWorkspaceClient(ctx context.Context, w *databricks.WorkspaceClient) cont return context.WithValue(ctx, &workspaceClient, w) } +func SetAccountClient(ctx context.Context, a *databricks.AccountClient) context.Context { + return context.WithValue(ctx, &accountClient, a) +} + func AskForWorkspaceProfile(ctx context.Context) (string, error) { - path, err := databrickscfg.GetPath(ctx) + profiler := profile.GetProfiler(ctx) + path, err := profiler.GetPath(ctx) if err != nil { return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) } - file, profiles, err := databrickscfg.LoadProfiles(ctx, databrickscfg.MatchWorkspaceProfiles) + profiles, err := profiler.LoadProfiles(ctx, profile.MatchWorkspaceProfiles) if err != nil { return "", err } switch len(profiles) { case 0: - return "", fmt.Errorf("%s does not contain workspace profiles; please create one first", path) + return "", ErrNoWorkspaceProfiles{path: path} case 1: return profiles[0].Name, nil } i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ - Label: fmt.Sprintf("Workspace profiles defined in %s", file), + Label: fmt.Sprintf("Workspace profiles defined in %s", path), Items: profiles, Searcher: profiles.SearchCaseInsensitive, StartInSearchMode: true, @@ -212,22 +269,23 @@ func AskForWorkspaceProfile(ctx context.Context) (string, error) { } func AskForAccountProfile(ctx context.Context) (string, error) { - path, err := databrickscfg.GetPath(ctx) + profiler := profile.GetProfiler(ctx) + path, err := profiler.GetPath(ctx) if err != nil { return "", fmt.Errorf("cannot determine Databricks config file path: %w", err) } - file, profiles, err := databrickscfg.LoadProfiles(ctx, databrickscfg.MatchAccountProfiles) + profiles, err := profiler.LoadProfiles(ctx, profile.MatchAccountProfiles) if err != nil { return "", err } switch len(profiles) { case 0: - return "", fmt.Errorf("%s does not contain account profiles; please create one first", path) + return "", ErrNoAccountProfiles{path} case 1: return profiles[0].Name, nil } i, _, err := cmdio.RunSelect(ctx, &promptui.Select{ - Label: fmt.Sprintf("Account profiles defined in %s", file), + Label: fmt.Sprintf("Account profiles defined in %s", path), Items: profiles, Searcher: profiles.SearchCaseInsensitive, StartInSearchMode: true, @@ -270,3 +328,11 @@ func AccountClient(ctx context.Context) *databricks.AccountClient { } return a } + +func ConfigUsed(ctx context.Context) *config.Config { + cfg, ok := ctx.Value(&configUsed).(*config.Config) + if !ok { + panic("cannot get *config.Config. Please report it as a bug") + } + return cfg +} diff --git a/cmd/root/auth_test.go b/cmd/root/auth_test.go index 7864c254e..486f587ef 100644 --- a/cmd/root/auth_test.go +++ b/cmd/root/auth_test.go @@ -229,3 +229,81 @@ func TestMustAccountClientErrorsWithNoDatabricksCfg(t *testing.T) { err := MustAccountClient(cmd, []string{}) require.ErrorContains(t, err, "no configuration file found at") } + +func TestMustAnyClientCanCreateWorkspaceClient(t *testing.T) { + testutil.CleanupEnvironment(t) + + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + err := os.WriteFile( + configFile, + []byte(` + [workspace-1111] + host = https://adb-1111.11.azuredatabricks.net/ + token = foobar + `), + 0755) + require.NoError(t, err) + + ctx, tt := cmdio.SetupTest(context.Background()) + t.Cleanup(tt.Done) + cmd := New(ctx) + + t.Setenv("DATABRICKS_CONFIG_FILE", configFile) + isAccount, err := MustAnyClient(cmd, []string{}) + require.False(t, isAccount) + require.NoError(t, err) + + w := WorkspaceClient(cmd.Context()) + require.NotNil(t, w) +} + +func TestMustAnyClientCanCreateAccountClient(t *testing.T) { + testutil.CleanupEnvironment(t) + + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + err := os.WriteFile( + configFile, + []byte(` + [account-1111] + host = https://accounts.azuredatabricks.net/ + account_id = 1111 + token = foobar + `), + 0755) + require.NoError(t, err) + + ctx, tt := cmdio.SetupTest(context.Background()) + t.Cleanup(tt.Done) + cmd := New(ctx) + + t.Setenv("DATABRICKS_CONFIG_FILE", configFile) + isAccount, err := MustAnyClient(cmd, []string{}) + require.NoError(t, err) + require.True(t, isAccount) + + a := AccountClient(cmd.Context()) + require.NotNil(t, a) +} + +func TestMustAnyClientWithEmptyDatabricksCfg(t *testing.T) { + testutil.CleanupEnvironment(t) + + dir := t.TempDir() + configFile := filepath.Join(dir, ".databrickscfg") + err := os.WriteFile( + configFile, + []byte(""), // empty file + 0755) + require.NoError(t, err) + + ctx, tt := cmdio.SetupTest(context.Background()) + t.Cleanup(tt.Done) + cmd := New(ctx) + + t.Setenv("DATABRICKS_CONFIG_FILE", configFile) + + _, err = MustAnyClient(cmd, []string{}) + require.ErrorContains(t, err, "does not contain account profiles") +} diff --git a/cmd/root/bundle.go b/cmd/root/bundle.go index 3f9d90db6..8b98f2cf2 100644 --- a/cmd/root/bundle.go +++ b/cmd/root/bundle.go @@ -4,8 +4,9 @@ import ( "context" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/env" + "github.com/databricks/cli/bundle/phases" + "github.com/databricks/cli/libs/diag" envlib "github.com/databricks/cli/libs/env" "github.com/spf13/cobra" "golang.org/x/exp/maps" @@ -49,81 +50,96 @@ func getProfile(cmd *cobra.Command) (value string) { return envlib.Get(cmd.Context(), "DATABRICKS_CONFIG_PROFILE") } -// loadBundle loads the bundle configuration and applies default mutators. -func loadBundle(cmd *cobra.Command, args []string, load func(ctx context.Context) (*bundle.Bundle, error)) (*bundle.Bundle, error) { - ctx := cmd.Context() - b, err := load(ctx) - if err != nil { - return nil, err - } - - // No bundle is fine in case of `TryConfigureBundle`. - if b == nil { - return nil, nil - } - +// configureProfile applies the profile flag to the bundle. +func configureProfile(cmd *cobra.Command, b *bundle.Bundle) diag.Diagnostics { profile := getProfile(cmd) - if profile != "" { - b.Config.Workspace.Profile = profile - } - - err = bundle.Apply(ctx, b, bundle.Seq(mutator.DefaultMutators()...)) - if err != nil { - return nil, err - } - - return b, nil -} - -// configureBundle loads the bundle configuration and configures it on the command's context. -func configureBundle(cmd *cobra.Command, args []string, load func(ctx context.Context) (*bundle.Bundle, error)) error { - b, err := loadBundle(cmd, args, load) - if err != nil { - return err - } - - // No bundle is fine in case of `TryConfigureBundle`. - if b == nil { + if profile == "" { return nil } + return bundle.ApplyFunc(cmd.Context(), b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.Profile = profile + return nil + }) +} + +// configureBundle loads the bundle configuration and configures flag values, if any. +func configureBundle(cmd *cobra.Command, b *bundle.Bundle) (*bundle.Bundle, diag.Diagnostics) { var m bundle.Mutator - env := getTarget(cmd) - if env == "" { - m = mutator.SelectDefaultTarget() + if target := getTarget(cmd); target == "" { + m = phases.LoadDefaultTarget() } else { - m = mutator.SelectTarget(env) + m = phases.LoadNamedTarget(target) } + // Load bundle and select target. ctx := cmd.Context() - err = bundle.Apply(ctx, b, m) - if err != nil { - return err + diags := bundle.Apply(ctx, b, m) + if diags.HasError() { + return b, diags } - cmd.SetContext(bundle.Context(ctx, b)) - return nil + // Configure the workspace profile if the flag has been set. + diags = diags.Extend(configureProfile(cmd, b)) + return b, diags } // MustConfigureBundle configures a bundle on the command context. -func MustConfigureBundle(cmd *cobra.Command, args []string) error { - return configureBundle(cmd, args, bundle.MustLoad) +func MustConfigureBundle(cmd *cobra.Command) (*bundle.Bundle, diag.Diagnostics) { + // A bundle may be configured on the context when testing. + // If it is, return it immediately. + b := bundle.GetOrNil(cmd.Context()) + if b != nil { + return b, nil + } + + b, err := bundle.MustLoad(cmd.Context()) + if err != nil { + return nil, diag.FromErr(err) + } + + return configureBundle(cmd, b) } // TryConfigureBundle configures a bundle on the command context // if there is one, but doesn't fail if there isn't one. -func TryConfigureBundle(cmd *cobra.Command, args []string) error { - return configureBundle(cmd, args, bundle.TryLoad) +func TryConfigureBundle(cmd *cobra.Command) (*bundle.Bundle, diag.Diagnostics) { + // A bundle may be configured on the context when testing. + // If it is, return it immediately. + b := bundle.GetOrNil(cmd.Context()) + if b != nil { + return b, nil + } + + b, err := bundle.TryLoad(cmd.Context()) + if err != nil { + return nil, diag.FromErr(err) + } + + // No bundle is fine in this case. + if b == nil { + return nil, nil + } + + return configureBundle(cmd, b) } // targetCompletion executes to autocomplete the argument to the target flag. func targetCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - b, err := loadBundle(cmd, args, bundle.MustLoad) + ctx := cmd.Context() + b, err := bundle.MustLoad(ctx) if err != nil { cobra.CompErrorln(err.Error()) return nil, cobra.ShellCompDirectiveError } + // Load bundle but don't select a target (we're completing those). + diags := bundle.Apply(ctx, b, phases.Load()) + if err := diags.Error(); err != nil { + cobra.CompErrorln(err.Error()) + return nil, cobra.ShellCompDirectiveError + } + return maps.Keys(b.Config.Targets), cobra.ShellCompDirectiveDefault } diff --git a/cmd/root/bundle_test.go b/cmd/root/bundle_test.go index a3dec491d..301884287 100644 --- a/cmd/root/bundle_test.go +++ b/cmd/root/bundle_test.go @@ -2,16 +2,17 @@ package root import ( "context" + "fmt" "os" "path/filepath" "runtime" "testing" "github.com/databricks/cli/bundle" - "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/internal/testutil" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func setupDatabricksCfg(t *testing.T) { @@ -37,43 +38,61 @@ func emptyCommand(t *testing.T) *cobra.Command { return cmd } -func setup(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle { +func setupWithHost(t *testing.T, cmd *cobra.Command, host string) *bundle.Bundle { setupDatabricksCfg(t) - err := configureBundle(cmd, []string{"validate"}, func(_ context.Context) (*bundle.Bundle, error) { - return &bundle.Bundle{ - Config: config.Root{ - Bundle: config.Bundle{ - Name: "test", - }, - Workspace: config.Workspace{ - Host: host, - }, - }, - }, nil - }) - assert.NoError(t, err) - return bundle.Get(cmd.Context()) + rootPath := t.TempDir() + testutil.Chdir(t, rootPath) + + contents := fmt.Sprintf(` +workspace: + host: %q +`, host) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + require.NoError(t, err) + + b, diags := MustConfigureBundle(cmd) + require.NoError(t, diags.Error()) + return b +} + +func setupWithProfile(t *testing.T, cmd *cobra.Command, profile string) *bundle.Bundle { + setupDatabricksCfg(t) + + rootPath := t.TempDir() + testutil.Chdir(t, rootPath) + + contents := fmt.Sprintf(` +workspace: + profile: %q +`, profile) + err := os.WriteFile(filepath.Join(rootPath, "databricks.yml"), []byte(contents), 0644) + require.NoError(t, err) + + b, diags := MustConfigureBundle(cmd) + require.NoError(t, diags.Error()) + return b } func TestBundleConfigureDefault(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - b := setup(t, cmd, "https://x.com") - assert.NotPanics(t, func() { - b.WorkspaceClient() - }) + b := setupWithHost(t, cmd, "https://x.com") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://x.com", client.Config.Host) } func TestBundleConfigureWithMultipleMatches(t *testing.T) { testutil.CleanupEnvironment(t) cmd := emptyCommand(t) - b := setup(t, cmd, "https://a.com") - assert.Panics(t, func() { - b.WorkspaceClient() - }) + b := setupWithHost(t, cmd, "https://a.com") + + _, err := b.InitializeWorkspaceClient() + assert.ErrorContains(t, err, "multiple profiles matched: PROFILE-1, PROFILE-2") } func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { @@ -81,11 +100,10 @@ func TestBundleConfigureWithNonExistentProfileFlag(t *testing.T) { cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("NOEXIST") + b := setupWithHost(t, cmd, "https://x.com") - b := setup(t, cmd, "https://x.com") - assert.Panics(t, func() { - b.WorkspaceClient() - }) + _, err := b.InitializeWorkspaceClient() + assert.ErrorContains(t, err, "has no NOEXIST profile configured") } func TestBundleConfigureWithMismatchedProfile(t *testing.T) { @@ -93,11 +111,10 @@ func TestBundleConfigureWithMismatchedProfile(t *testing.T) { cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("PROFILE-1") + b := setupWithHost(t, cmd, "https://x.com") - b := setup(t, cmd, "https://x.com") - assert.PanicsWithError(t, "cannot resolve bundle auth configuration: config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com", func() { - b.WorkspaceClient() - }) + _, err := b.InitializeWorkspaceClient() + assert.ErrorContains(t, err, "config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com") } func TestBundleConfigureWithCorrectProfile(t *testing.T) { @@ -105,35 +122,97 @@ func TestBundleConfigureWithCorrectProfile(t *testing.T) { cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("PROFILE-1") + b := setupWithHost(t, cmd, "https://a.com") - b := setup(t, cmd, "https://a.com") - assert.NotPanics(t, func() { - b.WorkspaceClient() - }) + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "PROFILE-1", client.Config.Profile) } func TestBundleConfigureWithMismatchedProfileEnvVariable(t *testing.T) { testutil.CleanupEnvironment(t) - t.Setenv("DATABRICKS_CONFIG_PROFILE", "PROFILE-1") + t.Setenv("DATABRICKS_CONFIG_PROFILE", "PROFILE-1") cmd := emptyCommand(t) - b := setup(t, cmd, "https://x.com") - assert.PanicsWithError(t, "cannot resolve bundle auth configuration: config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com", func() { - b.WorkspaceClient() - }) + b := setupWithHost(t, cmd, "https://x.com") + + _, err := b.InitializeWorkspaceClient() + assert.ErrorContains(t, err, "config host mismatch: profile uses host https://a.com, but CLI configured to use https://x.com") } func TestBundleConfigureWithProfileFlagAndEnvVariable(t *testing.T) { testutil.CleanupEnvironment(t) - t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") + t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") cmd := emptyCommand(t) cmd.Flag("profile").Value.Set("PROFILE-1") + b := setupWithHost(t, cmd, "https://a.com") - b := setup(t, cmd, "https://a.com") - assert.NotPanics(t, func() { - b.WorkspaceClient() - }) + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "PROFILE-1", client.Config.Profile) +} + +func TestBundleConfigureProfileDefault(t *testing.T) { + testutil.CleanupEnvironment(t) + + // The profile in the databricks.yml file is used + cmd := emptyCommand(t) + b := setupWithProfile(t, cmd, "PROFILE-1") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "a", client.Config.Token) + assert.Equal(t, "PROFILE-1", client.Config.Profile) +} + +func TestBundleConfigureProfileFlag(t *testing.T) { + testutil.CleanupEnvironment(t) + + // The --profile flag takes precedence over the profile in the databricks.yml file + cmd := emptyCommand(t) + cmd.Flag("profile").Value.Set("PROFILE-2") + b := setupWithProfile(t, cmd, "PROFILE-1") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "b", client.Config.Token) + assert.Equal(t, "PROFILE-2", client.Config.Profile) +} + +func TestBundleConfigureProfileEnvVariable(t *testing.T) { + testutil.CleanupEnvironment(t) + + // The DATABRICKS_CONFIG_PROFILE environment variable takes precedence over the profile in the databricks.yml file + t.Setenv("DATABRICKS_CONFIG_PROFILE", "PROFILE-2") + cmd := emptyCommand(t) + b := setupWithProfile(t, cmd, "PROFILE-1") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "b", client.Config.Token) + assert.Equal(t, "PROFILE-2", client.Config.Profile) +} + +func TestBundleConfigureProfileFlagAndEnvVariable(t *testing.T) { + testutil.CleanupEnvironment(t) + + // The --profile flag takes precedence over the DATABRICKS_CONFIG_PROFILE environment variable + t.Setenv("DATABRICKS_CONFIG_PROFILE", "NOEXIST") + cmd := emptyCommand(t) + cmd.Flag("profile").Value.Set("PROFILE-2") + b := setupWithProfile(t, cmd, "PROFILE-1") + + client, err := b.InitializeWorkspaceClient() + require.NoError(t, err) + assert.Equal(t, "https://a.com", client.Config.Host) + assert.Equal(t, "b", client.Config.Token) + assert.Equal(t, "PROFILE-2", client.Config.Profile) } func TestTargetFlagFull(t *testing.T) { @@ -145,7 +224,7 @@ func TestTargetFlagFull(t *testing.T) { err := cmd.ExecuteContext(ctx) assert.NoError(t, err) - assert.Equal(t, getTarget(cmd), "development") + assert.Equal(t, "development", getTarget(cmd)) } func TestTargetFlagShort(t *testing.T) { @@ -157,7 +236,7 @@ func TestTargetFlagShort(t *testing.T) { err := cmd.ExecuteContext(ctx) assert.NoError(t, err) - assert.Equal(t, getTarget(cmd), "production") + assert.Equal(t, "production", getTarget(cmd)) } // TODO: remove when environment flag is fully deprecated @@ -171,5 +250,5 @@ func TestTargetEnvironmentFlag(t *testing.T) { err := cmd.ExecuteContext(ctx) assert.NoError(t, err) - assert.Equal(t, getTarget(cmd), "development") + assert.Equal(t, "development", getTarget(cmd)) } diff --git a/cmd/root/io.go b/cmd/root/io.go index 23c7d6c64..b224bbb27 100644 --- a/cmd/root/io.go +++ b/cmd/root/io.go @@ -38,13 +38,14 @@ func OutputType(cmd *cobra.Command) flags.Output { } func (f *outputFlag) initializeIO(cmd *cobra.Command) error { - var template string + var headerTemplate, template string if cmd.Annotations != nil { // rely on zeroval being an empty string template = cmd.Annotations["template"] + headerTemplate = cmd.Annotations["headerTemplate"] } - cmdIO := cmdio.NewIO(f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), template) + cmdIO := cmdio.NewIO(f.output, cmd.InOrStdin(), cmd.OutOrStdout(), cmd.ErrOrStderr(), headerTemplate, template) ctx := cmdio.InContext(cmd.Context(), cmdIO) cmd.SetContext(ctx) return nil diff --git a/cmd/root/root.go b/cmd/root/root.go index 38eb42ccb..eda873d12 100644 --- a/cmd/root/root.go +++ b/cmd/root/root.go @@ -2,6 +2,7 @@ package root import ( "context" + "errors" "fmt" "os" "strings" @@ -91,13 +92,12 @@ func flagErrorFunc(c *cobra.Command, err error) error { // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. -func Execute(cmd *cobra.Command) { +func Execute(ctx context.Context, cmd *cobra.Command) error { // TODO: deferred panic recovery - ctx := context.Background() // Run the command cmd, err := cmd.ExecuteContextC(ctx) - if err != nil { + if err != nil && !errors.Is(err, ErrAlreadyPrinted) { // If cmdio logger initialization succeeds, then this function logs with the // initialized cmdio logger, otherwise with the default cmdio logger cmdio.LogError(cmd.Context(), err) @@ -117,7 +117,5 @@ func Execute(cmd *cobra.Command) { } } - if err != nil { - os.Exit(1) - } + return err } diff --git a/cmd/root/silent_err.go b/cmd/root/silent_err.go new file mode 100644 index 000000000..b361cc6b4 --- /dev/null +++ b/cmd/root/silent_err.go @@ -0,0 +1,7 @@ +package root + +import "errors" + +// ErrAlreadyPrinted is not printed to the user. It's used to signal that the command should exit with an error, +// but the error message was already printed. +var ErrAlreadyPrinted = errors.New("AlreadyPrinted") diff --git a/cmd/sync/sync.go b/cmd/sync/sync.go index c613e8ca1..bab451593 100644 --- a/cmd/sync/sync.go +++ b/cmd/sync/sync.go @@ -10,9 +10,11 @@ import ( "time" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/bundle/deploy/files" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/sync" + "github.com/databricks/cli/libs/vfs" "github.com/spf13/cobra" ) @@ -29,28 +31,14 @@ func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b * return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle") } - cacheDir, err := b.CacheDir(cmd.Context()) + opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b)) if err != nil { - return nil, fmt.Errorf("cannot get bundle cache directory: %w", err) + return nil, fmt.Errorf("cannot get sync options: %w", err) } - includes, err := b.GetSyncIncludePatterns(cmd.Context()) - if err != nil { - return nil, fmt.Errorf("cannot get list of sync includes: %w", err) - } - - opts := sync.SyncOptions{ - LocalPath: b.Config.Path, - RemotePath: b.Config.Workspace.FilePath, - Include: includes, - Exclude: b.Config.Sync.Exclude, - Full: f.full, - PollInterval: f.interval, - - SnapshotBasePath: cacheDir, - WorkspaceClient: b.WorkspaceClient(), - } - return &opts, nil + opts.Full = f.full + opts.PollInterval = f.interval + return opts, nil } func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*sync.SyncOptions, error) { @@ -59,7 +47,7 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn } opts := sync.SyncOptions{ - LocalPath: args[0], + LocalPath: vfs.MustNew(args[0]), RemotePath: args[1], Full: f.full, PollInterval: f.interval, @@ -78,7 +66,7 @@ func New() *cobra.Command { cmd := &cobra.Command{ Use: "sync [flags] SRC DST", Short: "Synchronize a local directory to a workspace directory", - Args: cobra.MaximumNArgs(2), + Args: root.MaximumNArgs(2), GroupID: "development", } @@ -147,7 +135,7 @@ func New() *cobra.Command { if f.watch { err = s.RunContinuous(ctx) } else { - err = s.RunOnce(ctx) + _, err = s.RunOnce(ctx) } s.Close() diff --git a/cmd/sync/sync_test.go b/cmd/sync/sync_test.go index 827c4d509..564aeae56 100644 --- a/cmd/sync/sync_test.go +++ b/cmd/sync/sync_test.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -16,9 +17,9 @@ import ( func TestSyncOptionsFromBundle(t *testing.T) { tempDir := t.TempDir() b := &bundle.Bundle{ + RootPath: tempDir, + BundleRoot: vfs.MustNew(tempDir), Config: config.Root{ - Path: tempDir, - Bundle: config.Bundle{ Target: "default", }, @@ -32,7 +33,7 @@ func TestSyncOptionsFromBundle(t *testing.T) { f := syncFlags{} opts, err := f.syncOptionsFromBundle(New(), []string{}, b) require.NoError(t, err) - assert.Equal(t, tempDir, opts.LocalPath) + assert.Equal(t, tempDir, opts.LocalPath.Native()) assert.Equal(t, "/Users/jane@doe.com/path", opts.RemotePath) assert.Equal(t, filepath.Join(tempDir, ".databricks", "bundle", "default"), opts.SnapshotBasePath) assert.NotNil(t, opts.WorkspaceClient) @@ -50,11 +51,14 @@ func TestSyncOptionsFromArgsRequiredTwoArgs(t *testing.T) { } func TestSyncOptionsFromArgs(t *testing.T) { + local := t.TempDir() + remote := "/remote" + f := syncFlags{} cmd := New() cmd.SetContext(root.SetWorkspaceClient(context.Background(), nil)) - opts, err := f.syncOptionsFromArgs(cmd, []string{"/local", "/remote"}) + opts, err := f.syncOptionsFromArgs(cmd, []string{local, remote}) require.NoError(t, err) - assert.Equal(t, "/local", opts.LocalPath) - assert.Equal(t, "/remote", opts.RemotePath) + assert.Equal(t, local, opts.LocalPath.Native()) + assert.Equal(t, remote, opts.RemotePath) } diff --git a/cmd/version/version.go b/cmd/version/version.go index 653fbb897..98881b910 100644 --- a/cmd/version/version.go +++ b/cmd/version/version.go @@ -1,6 +1,7 @@ package version import ( + "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/internal/build" "github.com/databricks/cli/libs/cmdio" "github.com/spf13/cobra" @@ -9,7 +10,7 @@ import ( func New() *cobra.Command { cmd := &cobra.Command{ Use: "version", - Args: cobra.NoArgs, + Args: root.NoArgs, Short: "Retrieve information about the current version of this CLI", Annotations: map[string]string{ "template": "Databricks CLI v{{.Version}}\n", diff --git a/cmd/workspace/alerts/alerts.go b/cmd/workspace/alerts/alerts.go index 773a34876..61c1e0eab 100755 --- a/cmd/workspace/alerts/alerts.go +++ b/cmd/workspace/alerts/alerts.go @@ -24,13 +24,25 @@ func New() *cobra.Command { Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies one or more users and/or notification destinations if the condition was met. Alerts can be scheduled using the sql_task type of - the Jobs API, e.g. :method:jobs/create.`, + the Jobs API, e.g. :method:jobs/create. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -66,7 +78,12 @@ func newCreate() *cobra.Command { Creates an alert. An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, and notifies users or notification - destinations if the condition was met.` + destinations if the condition was met. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -103,12 +120,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -130,8 +141,13 @@ func newDelete() *cobra.Command { cmd.Long = `Delete an alert. Deletes an alert. Deleted alerts are no longer accessible and cannot be - restored. **Note:** Unlike queries and dashboards, alerts cannot be moved to - the trash.` + restored. **Note**: Unlike queries and dashboards, alerts cannot be moved to + the trash. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -178,12 +194,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -204,7 +214,12 @@ func newGet() *cobra.Command { cmd.Short = `Get an alert.` cmd.Long = `Get an alert. - Gets an alert.` + Gets an alert. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -251,12 +266,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -272,7 +281,12 @@ func newList() *cobra.Command { cmd.Short = `Get alerts.` cmd.Long = `Get alerts. - Gets a list of alerts.` + Gets a list of alerts. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -299,12 +313,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -329,12 +337,17 @@ func newUpdate() *cobra.Command { cmd.Short = `Update an alert.` cmd.Long = `Update an alert. - Updates an alert.` + Updates an alert. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -372,10 +385,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Alerts diff --git a/cmd/workspace/apps/apps.go b/cmd/workspace/apps/apps.go index f0bd6acf8..1572d4f4b 100755 --- a/cmd/workspace/apps/apps.go +++ b/cmd/workspace/apps/apps.go @@ -4,6 +4,7 @@ package apps import ( "fmt" + "time" "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" @@ -19,10 +20,10 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ Use: "apps", - Short: `Lakehouse Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.`, - Long: `Lakehouse Apps run directly on a customer’s Databricks instance, integrate - with their data, use and extend Databricks services, and enable users to - interact through single sign-on.`, + Short: `Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.`, + Long: `Apps run directly on a customer’s Databricks instance, integrate with their + data, use and extend Databricks services, and enable users to interact through + single sign-on.`, GroupID: "serving", Annotations: map[string]string{ "package": "serving", @@ -32,6 +33,19 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeploy()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetDeployment()) + cmd.AddCommand(newGetEnvironment()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListDeployments()) + cmd.AddCommand(newStart()) + cmd.AddCommand(newStop()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -46,28 +60,49 @@ func New() *cobra.Command { // Functions can be added from the `init()` function in manually curated files in this directory. var createOverrides []func( *cobra.Command, - *serving.DeployAppRequest, + *serving.CreateAppRequest, ) func newCreate() *cobra.Command { cmd := &cobra.Command{} - var createReq serving.DeployAppRequest + var createReq serving.CreateAppRequest var createJson flags.JsonFlag + var createSkipWait bool + var createTimeout time.Duration + + cmd.Flags().BoolVar(&createSkipWait, "no-wait", createSkipWait, `do not wait to reach IDLE state`) + cmd.Flags().DurationVar(&createTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach IDLE state`) // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: output-only field + cmd.Flags().StringVar(&createReq.Description, "description", createReq.Description, `The description of the app.`) - cmd.Use = "create" - cmd.Short = `Create and deploy an application.` - cmd.Long = `Create and deploy an application. + cmd.Use = "create NAME" + cmd.Short = `Create an app.` + cmd.Long = `Create an app. - Creates and deploys an application.` + Creates a new app. + + Arguments: + NAME: The name of the app. The name must contain only lowercase alphanumeric + characters and hyphens. It must be unique within the workspace.` cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() @@ -78,15 +113,35 @@ func newCreate() *cobra.Command { if err != nil { return err } - } else { - return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + if !cmd.Flags().Changed("json") { + createReq.Name = args[0] } - response, err := w.Apps.Create(ctx, createReq) + wait, err := w.Apps.Create(ctx, createReq) if err != nil { return err } - return cmdio.Render(ctx, response) + if createSkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *serving.App) { + if i.Status == nil { + return + } + status := i.Status.State + statusMessage := fmt.Sprintf("current status: %s", status) + if i.Status != nil { + statusMessage = i.Status.Message + } + spinner <- statusMessage + }).GetWithTimeout(createTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) } // Disable completions since they are not applicable. @@ -101,41 +156,35 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - -// start delete-app command +// start delete command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var deleteAppOverrides []func( +var deleteOverrides []func( *cobra.Command, *serving.DeleteAppRequest, ) -func newDeleteApp() *cobra.Command { +func newDelete() *cobra.Command { cmd := &cobra.Command{} - var deleteAppReq serving.DeleteAppRequest + var deleteReq serving.DeleteAppRequest // TODO: short flags - cmd.Use = "delete-app NAME" - cmd.Short = `Delete an application.` - cmd.Long = `Delete an application. + cmd.Use = "delete NAME" + cmd.Short = `Delete an app.` + cmd.Long = `Delete an app. - Delete an application definition + Deletes an app. Arguments: - NAME: The name of an application. This field is required.` + NAME: The name of the app.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -144,13 +193,13 @@ func newDeleteApp() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - deleteAppReq.Name = args[0] + deleteReq.Name = args[0] - response, err := w.Apps.DeleteApp(ctx, deleteAppReq) + err = w.Apps.Delete(ctx, deleteReq) if err != nil { return err } - return cmdio.Render(ctx, response) + return nil } // Disable completions since they are not applicable. @@ -158,48 +207,156 @@ func newDeleteApp() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range deleteAppOverrides { - fn(cmd, &deleteAppReq) + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) } return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteApp()) - }) -} - -// start get-app command +// start deploy command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var getAppOverrides []func( +var deployOverrides []func( + *cobra.Command, + *serving.CreateAppDeploymentRequest, +) + +func newDeploy() *cobra.Command { + cmd := &cobra.Command{} + + var deployReq serving.CreateAppDeploymentRequest + var deployJson flags.JsonFlag + + var deploySkipWait bool + var deployTimeout time.Duration + + cmd.Flags().BoolVar(&deploySkipWait, "no-wait", deploySkipWait, `do not wait to reach SUCCEEDED state`) + cmd.Flags().DurationVar(&deployTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach SUCCEEDED state`) + // TODO: short flags + cmd.Flags().Var(&deployJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "deploy APP_NAME SOURCE_CODE_PATH MODE" + cmd.Short = `Create an app deployment.` + cmd.Long = `Create an app deployment. + + Creates an app deployment for the app with the supplied name. + + Arguments: + APP_NAME: The name of the app. + SOURCE_CODE_PATH: The workspace file system path of the source code used to create the app + deployment. This is different from + deployment_artifacts.source_code_path, which is the path used by the + deployed app. The former refers to the original source code location of + the app in the workspace during deployment creation, whereas the latter + provides a system generated stable snapshotted source code path used by + the deployment. + MODE: The mode of which the deployment will manage the source code.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only APP_NAME as positional arguments. Provide 'source_code_path', 'mode' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = deployJson.Unmarshal(&deployReq) + if err != nil { + return err + } + } + deployReq.AppName = args[0] + if !cmd.Flags().Changed("json") { + deployReq.SourceCodePath = args[1] + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &deployReq.Mode) + if err != nil { + return fmt.Errorf("invalid MODE: %s", args[2]) + } + } + + wait, err := w.Apps.Deploy(ctx, deployReq) + if err != nil { + return err + } + if deploySkipWait { + return cmdio.Render(ctx, wait.Response) + } + spinner := cmdio.Spinner(ctx) + info, err := wait.OnProgress(func(i *serving.AppDeployment) { + if i.Status == nil { + return + } + status := i.Status.State + statusMessage := fmt.Sprintf("current status: %s", status) + if i.Status != nil { + statusMessage = i.Status.Message + } + spinner <- statusMessage + }).GetWithTimeout(deployTimeout) + close(spinner) + if err != nil { + return err + } + return cmdio.Render(ctx, info) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deployOverrides { + fn(cmd, &deployReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( *cobra.Command, *serving.GetAppRequest, ) -func newGetApp() *cobra.Command { +func newGet() *cobra.Command { cmd := &cobra.Command{} - var getAppReq serving.GetAppRequest + var getReq serving.GetAppRequest // TODO: short flags - cmd.Use = "get-app NAME" - cmd.Short = `Get definition for an application.` - cmd.Long = `Get definition for an application. + cmd.Use = "get NAME" + cmd.Short = `Get an app.` + cmd.Long = `Get an app. - Get an application definition + Retrieves information for the app with the supplied name. Arguments: - NAME: The name of an application. This field is required.` + NAME: The name of the app.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -208,9 +365,9 @@ func newGetApp() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getAppReq.Name = args[0] + getReq.Name = args[0] - response, err := w.Apps.GetApp(ctx, getAppReq) + response, err := w.Apps.Get(ctx, getReq) if err != nil { return err } @@ -222,50 +379,44 @@ func newGetApp() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range getAppOverrides { - fn(cmd, &getAppReq) + for _, fn := range getOverrides { + fn(cmd, &getReq) } return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetApp()) - }) -} - -// start get-app-deployment-status command +// start get-deployment command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var getAppDeploymentStatusOverrides []func( +var getDeploymentOverrides []func( *cobra.Command, - *serving.GetAppDeploymentStatusRequest, + *serving.GetAppDeploymentRequest, ) -func newGetAppDeploymentStatus() *cobra.Command { +func newGetDeployment() *cobra.Command { cmd := &cobra.Command{} - var getAppDeploymentStatusReq serving.GetAppDeploymentStatusRequest + var getDeploymentReq serving.GetAppDeploymentRequest // TODO: short flags - cmd.Flags().StringVar(&getAppDeploymentStatusReq.IncludeAppLog, "include-app-log", getAppDeploymentStatusReq.IncludeAppLog, `Boolean flag to include application logs.`) - - cmd.Use = "get-app-deployment-status DEPLOYMENT_ID" - cmd.Short = `Get deployment status for an application.` - cmd.Long = `Get deployment status for an application. + cmd.Use = "get-deployment APP_NAME DEPLOYMENT_ID" + cmd.Short = `Get an app deployment.` + cmd.Long = `Get an app deployment. - Get deployment status for an application + Retrieves information for the app deployment with the supplied name and + deployment id. Arguments: - DEPLOYMENT_ID: The deployment id for an application. This field is required.` + APP_NAME: The name of the app. + DEPLOYMENT_ID: The unique id of the deployment.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(2) return check(cmd, args) } @@ -274,9 +425,10 @@ func newGetAppDeploymentStatus() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getAppDeploymentStatusReq.DeploymentId = args[0] + getDeploymentReq.AppName = args[0] + getDeploymentReq.DeploymentId = args[1] - response, err := w.Apps.GetAppDeploymentStatus(ctx, getAppDeploymentStatusReq) + response, err := w.Apps.GetDeployment(ctx, getDeploymentReq) if err != nil { return err } @@ -288,96 +440,42 @@ func newGetAppDeploymentStatus() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range getAppDeploymentStatusOverrides { - fn(cmd, &getAppDeploymentStatusReq) + for _, fn := range getDeploymentOverrides { + fn(cmd, &getDeploymentReq) } return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetAppDeploymentStatus()) - }) -} - -// start get-apps command +// start get-environment command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. -var getAppsOverrides []func( +var getEnvironmentOverrides []func( *cobra.Command, + *serving.GetAppEnvironmentRequest, ) -func newGetApps() *cobra.Command { +func newGetEnvironment() *cobra.Command { cmd := &cobra.Command{} - cmd.Use = "get-apps" - cmd.Short = `List all applications.` - cmd.Long = `List all applications. - - List all available applications` - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - response, err := w.Apps.GetApps(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range getAppsOverrides { - fn(cmd) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetApps()) - }) -} - -// start get-events command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var getEventsOverrides []func( - *cobra.Command, - *serving.GetEventsRequest, -) - -func newGetEvents() *cobra.Command { - cmd := &cobra.Command{} - - var getEventsReq serving.GetEventsRequest + var getEnvironmentReq serving.GetAppEnvironmentRequest // TODO: short flags - cmd.Use = "get-events NAME" - cmd.Short = `Get deployment events for an application.` - cmd.Long = `Get deployment events for an application. + cmd.Use = "get-environment NAME" + cmd.Short = `Get app environment.` + cmd.Long = `Get app environment. - Get deployment events for an application + Retrieves app environment. Arguments: - NAME: The name of an application. This field is required.` + NAME: The name of the app.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -386,9 +484,9 @@ func newGetEvents() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getEventsReq.Name = args[0] + getEnvironmentReq.Name = args[0] - response, err := w.Apps.GetEvents(ctx, getEventsReq) + response, err := w.Apps.GetEnvironment(ctx, getEnvironmentReq) if err != nil { return err } @@ -400,17 +498,307 @@ func newGetEvents() *cobra.Command { cmd.ValidArgsFunction = cobra.NoFileCompletions // Apply optional overrides to this command. - for _, fn := range getEventsOverrides { - fn(cmd, &getEventsReq) + for _, fn := range getEnvironmentOverrides { + fn(cmd, &getEnvironmentReq) } return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetEvents()) - }) +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *serving.ListAppsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq serving.ListAppsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Pagination token to go to the next page of apps.`) + + cmd.Use = "list" + cmd.Short = `List apps.` + cmd.Long = `List apps. + + Lists all apps in the workspace.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.Apps.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start list-deployments command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listDeploymentsOverrides []func( + *cobra.Command, + *serving.ListAppDeploymentsRequest, +) + +func newListDeployments() *cobra.Command { + cmd := &cobra.Command{} + + var listDeploymentsReq serving.ListAppDeploymentsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listDeploymentsReq.PageSize, "page-size", listDeploymentsReq.PageSize, `Upper bound for items returned.`) + cmd.Flags().StringVar(&listDeploymentsReq.PageToken, "page-token", listDeploymentsReq.PageToken, `Pagination token to go to the next page of apps.`) + + cmd.Use = "list-deployments APP_NAME" + cmd.Short = `List app deployments.` + cmd.Long = `List app deployments. + + Lists all app deployments for the app with the supplied name. + + Arguments: + APP_NAME: The name of the app.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listDeploymentsReq.AppName = args[0] + + response := w.Apps.ListDeployments(ctx, listDeploymentsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listDeploymentsOverrides { + fn(cmd, &listDeploymentsReq) + } + + return cmd +} + +// start start command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var startOverrides []func( + *cobra.Command, + *serving.StartAppRequest, +) + +func newStart() *cobra.Command { + cmd := &cobra.Command{} + + var startReq serving.StartAppRequest + + // TODO: short flags + + cmd.Use = "start NAME" + cmd.Short = `Start an app.` + cmd.Long = `Start an app. + + Start the last active deployment of the app in the workspace. + + Arguments: + NAME: The name of the app.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + startReq.Name = args[0] + + response, err := w.Apps.Start(ctx, startReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range startOverrides { + fn(cmd, &startReq) + } + + return cmd +} + +// start stop command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var stopOverrides []func( + *cobra.Command, + *serving.StopAppRequest, +) + +func newStop() *cobra.Command { + cmd := &cobra.Command{} + + var stopReq serving.StopAppRequest + + // TODO: short flags + + cmd.Use = "stop NAME" + cmd.Short = `Stop an app.` + cmd.Long = `Stop an app. + + Stops the active deployment of the app in the workspace. + + Arguments: + NAME: The name of the app.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + stopReq.Name = args[0] + + err = w.Apps.Stop(ctx, stopReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range stopOverrides { + fn(cmd, &stopReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *serving.UpdateAppRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq serving.UpdateAppRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Description, "description", updateReq.Description, `The description of the app.`) + + cmd.Use = "update NAME" + cmd.Short = `Update an app.` + cmd.Long = `Update an app. + + Updates the app with the supplied name. + + Arguments: + NAME: The name of the app. The name must contain only lowercase alphanumeric + characters and hyphens. It must be unique within the workspace.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.Name = args[0] + + response, err := w.Apps.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd } // end service Apps diff --git a/cmd/workspace/apps/overrides.go b/cmd/workspace/apps/overrides.go deleted file mode 100644 index e38e139b5..000000000 --- a/cmd/workspace/apps/overrides.go +++ /dev/null @@ -1,58 +0,0 @@ -package apps - -import ( - "fmt" - - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/serving" - "github.com/spf13/cobra" -) - -func createOverride(cmd *cobra.Command, deployReq *serving.DeployAppRequest) { - var manifestYaml flags.YamlFlag - var resourcesYaml flags.YamlFlag - createJson := cmd.Flag("json").Value.(*flags.JsonFlag) - - // TODO: short flags - cmd.Flags().Var(&manifestYaml, "manifest", `either inline YAML string or @path/to/manifest.yaml`) - cmd.Flags().Var(&resourcesYaml, "resources", `either inline YAML string or @path/to/resources.yaml`) - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - if cmd.Flags().Changed("json") { - err = createJson.Unmarshal(&deployReq) - if err != nil { - return err - } - } else if cmd.Flags().Changed("manifest") { - err = manifestYaml.Unmarshal(&deployReq.Manifest) - if err != nil { - return err - } - if cmd.Flags().Changed("resources") { - err = resourcesYaml.Unmarshal(&deployReq.Resources) - if err != nil { - return err - } - } - } else { - return fmt.Errorf("please provide command input in YAML format by specifying the --manifest flag or provide a json payload using the --json flag") - } - response, err := w.Apps.Create(ctx, *deployReq) - if err != nil { - return err - } - - return cmdio.Render(ctx, response) - } -} - -func init() { - createOverrides = append(createOverrides, createOverride) -} diff --git a/cmd/workspace/artifact-allowlists/artifact-allowlists.go b/cmd/workspace/artifact-allowlists/artifact-allowlists.go index e0b36ff92..fc25e3cb8 100755 --- a/cmd/workspace/artifact-allowlists/artifact-allowlists.go +++ b/cmd/workspace/artifact-allowlists/artifact-allowlists.go @@ -29,6 +29,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -66,7 +70,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -99,12 +103,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -137,7 +135,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -178,10 +176,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service ArtifactAllowlists diff --git a/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go new file mode 100755 index 000000000..2385195bb --- /dev/null +++ b/cmd/workspace/automatic-cluster-update/automatic-cluster-update.go @@ -0,0 +1,157 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package automatic_cluster_update + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "automatic-cluster-update", + Short: `Controls whether automatic cluster update is enabled for the current workspace.`, + Long: `Controls whether automatic cluster update is enabled for the current + workspace. By default, it is turned off.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetAutomaticClusterUpdateSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetAutomaticClusterUpdateSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the automatic cluster update setting.` + cmd.Long = `Get the automatic cluster update setting. + + Gets the automatic cluster update setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.AutomaticClusterUpdate().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateAutomaticClusterUpdateSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateAutomaticClusterUpdateSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the automatic cluster update setting.` + cmd.Long = `Update the automatic cluster update setting. + + Updates the automatic cluster update setting for the workspace. A fresh etag + needs to be provided in PATCH requests (as part of the setting field). The + etag can be retrieved by making a GET request before the PATCH request. If + the setting is updated concurrently, PATCH fails with 409 and the request + must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.AutomaticClusterUpdate().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service AutomaticClusterUpdate diff --git a/cmd/workspace/catalogs/catalogs.go b/cmd/workspace/catalogs/catalogs.go index 6ffe4a395..a17bb0072 100755 --- a/cmd/workspace/catalogs/catalogs.go +++ b/cmd/workspace/catalogs/catalogs.go @@ -34,6 +34,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -82,13 +89,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -126,12 +133,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -163,7 +164,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -193,12 +194,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -215,6 +210,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include catalogs in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get NAME" cmd.Short = `Get a catalog.` cmd.Long = `Get a catalog. @@ -229,7 +226,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -259,23 +256,26 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *catalog.ListCatalogsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq catalog.ListCatalogsRequest + + // TODO: short flags + + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include catalogs in the response for which the principal can only access selective metadata for.`) + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of catalogs to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "list" cmd.Short = `List catalogs.` cmd.Long = `List catalogs. @@ -288,15 +288,18 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Catalogs.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + + response := w.Catalogs.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -305,18 +308,12 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -356,7 +353,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -392,10 +389,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Catalogs diff --git a/cmd/workspace/catalogs/overrides.go b/cmd/workspace/catalogs/overrides.go index 6de7a7771..e2201dc15 100644 --- a/cmd/workspace/catalogs/overrides.go +++ b/cmd/workspace/catalogs/overrides.go @@ -2,12 +2,14 @@ package catalogs import ( "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/spf13/cobra" ) -func listOverride(listCmd *cobra.Command) { +func listOverride(listCmd *cobra.Command, listReq *catalog.ListCatalogsRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Name"}} {{header "Type"}} {{header "Comment"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Name"}} {{header "Type"}} {{header "Comment"}} {{range .}}{{.Name|green}} {{blue "%s" .CatalogType}} {{.Comment}} {{end}}`) } diff --git a/cmd/workspace/clean-rooms/clean-rooms.go b/cmd/workspace/clean-rooms/clean-rooms.go index 99d732f90..9466c4b94 100755 --- a/cmd/workspace/clean-rooms/clean-rooms.go +++ b/cmd/workspace/clean-rooms/clean-rooms.go @@ -35,6 +35,13 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -105,12 +112,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -127,7 +128,7 @@ func newDelete() *cobra.Command { // TODO: short flags - cmd.Use = "delete NAME_ARG" + cmd.Use = "delete NAME" cmd.Short = `Delete a clean room.` cmd.Long = `Delete a clean room. @@ -135,12 +136,12 @@ func newDelete() *cobra.Command { owner of the clean room. Arguments: - NAME_ARG: The name of the clean room.` + NAME: The name of the clean room.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -149,7 +150,7 @@ func newDelete() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - deleteReq.NameArg = args[0] + deleteReq.Name = args[0] err = w.CleanRooms.Delete(ctx, deleteReq) if err != nil { @@ -170,12 +171,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -194,7 +189,7 @@ func newGet() *cobra.Command { cmd.Flags().BoolVar(&getReq.IncludeRemoteDetails, "include-remote-details", getReq.IncludeRemoteDetails, `Whether to include remote details (central) on the clean room.`) - cmd.Use = "get NAME_ARG" + cmd.Use = "get NAME" cmd.Short = `Get a clean room.` cmd.Long = `Get a clean room. @@ -202,12 +197,12 @@ func newGet() *cobra.Command { metastore admin or the owner of the clean room. Arguments: - NAME_ARG: The name of the clean room.` + NAME: The name of the clean room.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -216,7 +211,7 @@ func newGet() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - getReq.NameArg = args[0] + getReq.Name = args[0] response, err := w.CleanRooms.Get(ctx, getReq) if err != nil { @@ -237,12 +232,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -273,7 +262,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -282,11 +271,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.CleanRooms.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.CleanRooms.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -301,12 +287,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -329,7 +309,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of clean room.`) - cmd.Use = "update NAME_ARG" + cmd.Use = "update NAME" cmd.Short = `Update a clean room.` cmd.Long = `Update a clean room. @@ -349,12 +329,12 @@ func newUpdate() *cobra.Command { Table removals through **update** do not require additional privileges. Arguments: - NAME_ARG: The name of the clean room.` + NAME: The name of the clean room.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -369,7 +349,7 @@ func newUpdate() *cobra.Command { return err } } - updateReq.NameArg = args[0] + updateReq.Name = args[0] response, err := w.CleanRooms.Update(ctx, updateReq) if err != nil { @@ -390,10 +370,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service CleanRooms diff --git a/cmd/workspace/cluster-policies/cluster-policies.go b/cmd/workspace/cluster-policies/cluster-policies.go index 339e87c4f..8129db477 100755 --- a/cmd/workspace/cluster-policies/cluster-policies.go +++ b/cmd/workspace/cluster-policies/cluster-policies.go @@ -26,10 +26,11 @@ func New() *cobra.Command { limit their use to specific users and groups. With cluster policies, you can: - Auto-install cluster libraries on the next - restart by listing them in the policy's "libraries" field. - Limit users to - creating clusters with the prescribed settings. - Simplify the user interface, - enabling more users to create clusters, by fixing and hiding some fields. - - Manage costs by setting limits on attributes that impact the hourly rate. + restart by listing them in the policy's "libraries" field (Public Preview). - + Limit users to creating clusters with the prescribed settings. - Simplify the + user interface, enabling more users to create clusters, by fixing and hiding + some fields. - Manage costs by setting limits on attributes that impact the + hourly rate. Cluster policy permissions limit which policies a user can select in the Policy drop-down when the user creates a cluster: - A user who has @@ -48,6 +49,17 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -95,13 +107,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -139,12 +151,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -177,7 +183,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'policy_id' in your JSON input") } @@ -236,12 +242,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -283,13 +283,13 @@ func newEdit() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'policy_id', 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -330,12 +330,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -406,12 +400,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -482,12 +470,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -559,12 +541,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -593,7 +569,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -602,11 +578,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ClusterPolicies.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ClusterPolicies.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -621,12 +594,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -708,12 +675,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -795,10 +756,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service ClusterPolicies diff --git a/cmd/workspace/clusters/clusters.go b/cmd/workspace/clusters/clusters.go index d159ffd7b..abde1bb71 100755 --- a/cmd/workspace/clusters/clusters.go +++ b/cmd/workspace/clusters/clusters.go @@ -54,6 +54,28 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newChangeOwner()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newEvents()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListNodeTypes()) + cmd.AddCommand(newListZones()) + cmd.AddCommand(newPermanentDelete()) + cmd.AddCommand(newPin()) + cmd.AddCommand(newResize()) + cmd.AddCommand(newRestart()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newSparkVersions()) + cmd.AddCommand(newStart()) + cmd.AddCommand(newUnpin()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -84,8 +106,9 @@ func newChangeOwner() *cobra.Command { cmd.Short = `Change cluster owner.` cmd.Long = `Change cluster owner. - Change the owner of the cluster. You must be an admin to perform this - operation. + Change the owner of the cluster. You must be an admin and the cluster must be + terminated to perform this operation. The service principal application ID can + be supplied as an argument to owner_username. Arguments: CLUSTER_ID: @@ -95,13 +118,13 @@ func newChangeOwner() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id', 'owner_username' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -142,12 +165,6 @@ func newChangeOwner() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newChangeOwner()) - }) -} - // start create command // Slice with functions to override default command behavior. @@ -171,26 +188,19 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, ``) + cmd.Flags().BoolVar(&createReq.ApplyPolicyDefaultValues, "apply-policy-default-values", createReq.ApplyPolicyDefaultValues, `When set to true, fixed and default values from the policy will be used for fields that are omitted.`) // TODO: complex arg: autoscale cmd.Flags().IntVar(&createReq.AutoterminationMinutes, "autotermination-minutes", createReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`) // TODO: complex arg: aws_attributes // TODO: complex arg: azure_attributes + // TODO: complex arg: clone_from // TODO: complex arg: cluster_log_conf cmd.Flags().StringVar(&createReq.ClusterName, "cluster-name", createReq.ClusterName, `Cluster name requested by the user.`) - cmd.Flags().Var(&createReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [ - API, - JOB, - MODELS, - PIPELINE, - PIPELINE_MAINTENANCE, - SQL, - UI, -]`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&createReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, + LEGACY_SINGLE_USER_STANDARD, LEGACY_TABLE_ACL, NONE, SINGLE_USER, @@ -236,13 +246,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'spark_version' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -292,12 +302,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -337,7 +341,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -408,12 +412,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -437,26 +435,18 @@ func newEdit() *cobra.Command { // TODO: short flags cmd.Flags().Var(&editJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, ``) + cmd.Flags().BoolVar(&editReq.ApplyPolicyDefaultValues, "apply-policy-default-values", editReq.ApplyPolicyDefaultValues, `When set to true, fixed and default values from the policy will be used for fields that are omitted.`) // TODO: complex arg: autoscale cmd.Flags().IntVar(&editReq.AutoterminationMinutes, "autotermination-minutes", editReq.AutoterminationMinutes, `Automatically terminates the cluster after it is inactive for this time in minutes.`) // TODO: complex arg: aws_attributes // TODO: complex arg: azure_attributes // TODO: complex arg: cluster_log_conf cmd.Flags().StringVar(&editReq.ClusterName, "cluster-name", editReq.ClusterName, `Cluster name requested by the user.`) - cmd.Flags().Var(&editReq.ClusterSource, "cluster-source", `Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. Supported values: [ - API, - JOB, - MODELS, - PIPELINE, - PIPELINE_MAINTENANCE, - SQL, - UI, -]`) // TODO: map via StringToStringVar: custom_tags cmd.Flags().Var(&editReq.DataSecurityMode, "data-security-mode", `Data security mode decides what data governance model to use when accessing data from a cluster. Supported values: [ LEGACY_PASSTHROUGH, LEGACY_SINGLE_USER, + LEGACY_SINGLE_USER_STANDARD, LEGACY_TABLE_ACL, NONE, SINGLE_USER, @@ -507,13 +497,13 @@ func newEdit() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id', 'spark_version' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -566,12 +556,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start events command // Slice with functions to override default command behavior. @@ -612,7 +596,7 @@ func newEvents() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -652,11 +636,8 @@ func newEvents() *cobra.Command { eventsReq.ClusterId = args[0] } - response, err := w.Clusters.EventsAll(ctx, eventsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Clusters.Events(ctx, eventsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -671,12 +652,6 @@ func newEvents() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEvents()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -753,12 +728,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -829,12 +798,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -906,12 +869,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -947,7 +904,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -956,11 +913,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Clusters.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Clusters.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -975,12 +929,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start list-node-types command // Slice with functions to override default command behavior. @@ -1024,12 +972,6 @@ func newListNodeTypes() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListNodeTypes()) - }) -} - // start list-zones command // Slice with functions to override default command behavior. @@ -1073,12 +1015,6 @@ func newListZones() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListZones()) - }) -} - // start permanent-delete command // Slice with functions to override default command behavior. @@ -1115,7 +1051,7 @@ func newPermanentDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1174,12 +1110,6 @@ func newPermanentDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPermanentDelete()) - }) -} - // start pin command // Slice with functions to override default command behavior. @@ -1213,7 +1143,7 @@ func newPin() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1272,12 +1202,6 @@ func newPin() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPin()) - }) -} - // start resize command // Slice with functions to override default command behavior. @@ -1318,7 +1242,7 @@ func newResize() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1389,12 +1313,6 @@ func newResize() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newResize()) - }) -} - // start restart command // Slice with functions to override default command behavior. @@ -1434,7 +1352,7 @@ func newRestart() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1505,12 +1423,6 @@ func newRestart() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestart()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -1592,12 +1504,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start spark-versions command // Slice with functions to override default command behavior. @@ -1641,12 +1547,6 @@ func newSparkVersions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSparkVersions()) - }) -} - // start start command // Slice with functions to override default command behavior. @@ -1690,7 +1590,7 @@ func newStart() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1761,12 +1661,6 @@ func newStart() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStart()) - }) -} - // start unpin command // Slice with functions to override default command behavior. @@ -1800,7 +1694,7 @@ func newUnpin() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input") } @@ -1859,12 +1753,6 @@ func newUnpin() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUnpin()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1946,10 +1834,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Clusters diff --git a/cmd/workspace/clusters/overrides.go b/cmd/workspace/clusters/overrides.go index ab32a4cd8..55976d406 100644 --- a/cmd/workspace/clusters/overrides.go +++ b/cmd/workspace/clusters/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, _ *compute.ListClustersRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{header "State"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{header "State"}} {{range .}}{{.ClusterId | green}} {{.ClusterName | cyan}} {{if eq .State "RUNNING"}}{{green "%s" .State}}{{else if eq .State "TERMINATED"}}{{red "%s" .State}}{{else}}{{blue "%s" .State}}{{end}} {{end}}`) } diff --git a/cmd/workspace/cmd.go b/cmd/workspace/cmd.go index f8e911d1f..7ad9389a8 100755 --- a/cmd/workspace/cmd.go +++ b/cmd/workspace/cmd.go @@ -11,6 +11,11 @@ import ( cluster_policies "github.com/databricks/cli/cmd/workspace/cluster-policies" clusters "github.com/databricks/cli/cmd/workspace/clusters" connections "github.com/databricks/cli/cmd/workspace/connections" + consumer_fulfillments "github.com/databricks/cli/cmd/workspace/consumer-fulfillments" + consumer_installations "github.com/databricks/cli/cmd/workspace/consumer-installations" + consumer_listings "github.com/databricks/cli/cmd/workspace/consumer-listings" + consumer_personalization_requests "github.com/databricks/cli/cmd/workspace/consumer-personalization-requests" + consumer_providers "github.com/databricks/cli/cmd/workspace/consumer-providers" credentials_manager "github.com/databricks/cli/cmd/workspace/credentials-manager" current_user "github.com/databricks/cli/cmd/workspace/current-user" dashboard_widgets "github.com/databricks/cli/cmd/workspace/dashboard-widgets" @@ -32,10 +37,20 @@ import ( metastores "github.com/databricks/cli/cmd/workspace/metastores" model_registry "github.com/databricks/cli/cmd/workspace/model-registry" model_versions "github.com/databricks/cli/cmd/workspace/model-versions" + online_tables "github.com/databricks/cli/cmd/workspace/online-tables" + permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration" permissions "github.com/databricks/cli/cmd/workspace/permissions" pipelines "github.com/databricks/cli/cmd/workspace/pipelines" policy_families "github.com/databricks/cli/cmd/workspace/policy-families" + provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters" + provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges" + provider_files "github.com/databricks/cli/cmd/workspace/provider-files" + provider_listings "github.com/databricks/cli/cmd/workspace/provider-listings" + provider_personalization_requests "github.com/databricks/cli/cmd/workspace/provider-personalization-requests" + provider_provider_analytics_dashboards "github.com/databricks/cli/cmd/workspace/provider-provider-analytics-dashboards" + provider_providers "github.com/databricks/cli/cmd/workspace/provider-providers" providers "github.com/databricks/cli/cmd/workspace/providers" + quality_monitors "github.com/databricks/cli/cmd/workspace/quality-monitors" queries "github.com/databricks/cli/cmd/workspace/queries" query_history "github.com/databricks/cli/cmd/workspace/query-history" query_visualizations "github.com/databricks/cli/cmd/workspace/query-visualizations" @@ -77,6 +92,11 @@ func All() []*cobra.Command { out = append(out, cluster_policies.New()) out = append(out, clusters.New()) out = append(out, connections.New()) + out = append(out, consumer_fulfillments.New()) + out = append(out, consumer_installations.New()) + out = append(out, consumer_listings.New()) + out = append(out, consumer_personalization_requests.New()) + out = append(out, consumer_providers.New()) out = append(out, credentials_manager.New()) out = append(out, current_user.New()) out = append(out, dashboard_widgets.New()) @@ -98,10 +118,20 @@ func All() []*cobra.Command { out = append(out, metastores.New()) out = append(out, model_registry.New()) out = append(out, model_versions.New()) + out = append(out, online_tables.New()) + out = append(out, permission_migration.New()) out = append(out, permissions.New()) out = append(out, pipelines.New()) out = append(out, policy_families.New()) + out = append(out, provider_exchange_filters.New()) + out = append(out, provider_exchanges.New()) + out = append(out, provider_files.New()) + out = append(out, provider_listings.New()) + out = append(out, provider_personalization_requests.New()) + out = append(out, provider_provider_analytics_dashboards.New()) + out = append(out, provider_providers.New()) out = append(out, providers.New()) + out = append(out, quality_monitors.New()) out = append(out, queries.New()) out = append(out, query_history.New()) out = append(out, query_visualizations.New()) diff --git a/cmd/workspace/compliance-security-profile/compliance-security-profile.go b/cmd/workspace/compliance-security-profile/compliance-security-profile.go new file mode 100755 index 000000000..a7b45901f --- /dev/null +++ b/cmd/workspace/compliance-security-profile/compliance-security-profile.go @@ -0,0 +1,160 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package compliance_security_profile + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "compliance-security-profile", + Short: `Controls whether to enable the compliance security profile for the current workspace.`, + Long: `Controls whether to enable the compliance security profile for the current + workspace. Enabling it on a workspace is permanent. By default, it is turned + off. + + This settings can NOT be disabled once it is enabled.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetComplianceSecurityProfileSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetComplianceSecurityProfileSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the compliance security profile setting.` + cmd.Long = `Get the compliance security profile setting. + + Gets the compliance security profile setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.ComplianceSecurityProfile().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateComplianceSecurityProfileSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateComplianceSecurityProfileSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the compliance security profile setting.` + cmd.Long = `Update the compliance security profile setting. + + Updates the compliance security profile setting for the workspace. A fresh + etag needs to be provided in PATCH requests (as part of the setting field). + The etag can be retrieved by making a GET request before the PATCH + request. If the setting is updated concurrently, PATCH fails with 409 and + the request must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.ComplianceSecurityProfile().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ComplianceSecurityProfile diff --git a/cmd/workspace/connections/connections.go b/cmd/workspace/connections/connections.go index 5ad0c199b..f76420fbe 100755 --- a/cmd/workspace/connections/connections.go +++ b/cmd/workspace/connections/connections.go @@ -37,6 +37,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -112,12 +119,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -134,14 +135,14 @@ func newDelete() *cobra.Command { // TODO: short flags - cmd.Use = "delete NAME_ARG" + cmd.Use = "delete NAME" cmd.Short = `Delete a connection.` cmd.Long = `Delete a connection. Deletes the connection that matches the supplied name. Arguments: - NAME_ARG: The name of the connection to be deleted.` + NAME: The name of the connection to be deleted.` cmd.Annotations = make(map[string]string) @@ -152,8 +153,8 @@ func newDelete() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NAME_ARG argument specified. Loading names for Connections drop-down." - names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx) + promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down." + names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx, catalog.ListConnectionsRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Connections drop-down. Please manually specify required arguments. Original error: %w", err) @@ -167,7 +168,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the name of the connection to be deleted") } - deleteReq.NameArg = args[0] + deleteReq.Name = args[0] err = w.Connections.Delete(ctx, deleteReq) if err != nil { @@ -188,12 +189,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -210,14 +205,14 @@ func newGet() *cobra.Command { // TODO: short flags - cmd.Use = "get NAME_ARG" + cmd.Use = "get NAME" cmd.Short = `Get a connection.` cmd.Long = `Get a connection. Gets a connection from it's name. Arguments: - NAME_ARG: Name of the connection.` + NAME: Name of the connection.` cmd.Annotations = make(map[string]string) @@ -228,8 +223,8 @@ func newGet() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No NAME_ARG argument specified. Loading names for Connections drop-down." - names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx) + promptSpinner <- "No NAME argument specified. Loading names for Connections drop-down." + names, err := w.Connections.ConnectionInfoNameToFullNameMap(ctx, catalog.ListConnectionsRequest{}) close(promptSpinner) if err != nil { return fmt.Errorf("failed to load names for Connections drop-down. Please manually specify required arguments. Original error: %w", err) @@ -243,7 +238,7 @@ func newGet() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have name of the connection") } - getReq.NameArg = args[0] + getReq.Name = args[0] response, err := w.Connections.Get(ctx, getReq) if err != nil { @@ -264,23 +259,25 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var listOverrides []func( *cobra.Command, + *catalog.ListConnectionsRequest, ) func newList() *cobra.Command { cmd := &cobra.Command{} + var listReq catalog.ListConnectionsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of connections to return.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) + cmd.Use = "list" cmd.Short = `List connections.` cmd.Long = `List connections. @@ -289,15 +286,18 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Connections.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + + response := w.Connections.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -306,18 +306,12 @@ func newList() *cobra.Command { // Apply optional overrides to this command. for _, fn := range listOverrides { - fn(cmd) + fn(cmd, &listReq) } return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -336,23 +330,22 @@ func newUpdate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of the connection.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the connection.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of the connection.`) - cmd.Use = "update NAME_ARG" + cmd.Use = "update NAME" cmd.Short = `Update a connection.` cmd.Long = `Update a connection. Updates the connection that matches the supplied name. Arguments: - NAME_ARG: Name of the connection.` + NAME: Name of the connection.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -369,7 +362,7 @@ func newUpdate() *cobra.Command { } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } - updateReq.NameArg = args[0] + updateReq.Name = args[0] response, err := w.Connections.Update(ctx, updateReq) if err != nil { @@ -390,10 +383,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Connections diff --git a/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go b/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go new file mode 100755 index 000000000..6f3ba4b42 --- /dev/null +++ b/cmd/workspace/consumer-fulfillments/consumer-fulfillments.go @@ -0,0 +1,156 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_fulfillments + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-fulfillments", + Short: `Fulfillments are entities that allow consumers to preview installations.`, + Long: `Fulfillments are entities that allow consumers to preview installations.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetListingContentMetadataRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetListingContentMetadataRequest + + // TODO: short flags + + cmd.Flags().IntVar(&getReq.PageSize, "page-size", getReq.PageSize, ``) + cmd.Flags().StringVar(&getReq.PageToken, "page-token", getReq.PageToken, ``) + + cmd.Use = "get LISTING_ID" + cmd.Short = `Get listing content metadata.` + cmd.Long = `Get listing content metadata. + + Get a high level preview of the metadata of listing installable content.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.ListingId = args[0] + + response := w.ConsumerFulfillments.Get(ctx, getReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListFulfillmentsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListFulfillmentsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list LISTING_ID" + cmd.Short = `List all listing fulfillments.` + cmd.Long = `List all listing fulfillments. + + Get all listings fulfillments associated with a listing. A _fulfillment_ is a + potential installation. Standard installations contain metadata about the + attached share or git repo. Only one of these fields will be present. + Personalized installations contain metadata about the attached share or git + repo, as well as the Delta Sharing recipient type.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.ListingId = args[0] + + response := w.ConsumerFulfillments.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service ConsumerFulfillments diff --git a/cmd/workspace/consumer-installations/consumer-installations.go b/cmd/workspace/consumer-installations/consumer-installations.go new file mode 100755 index 000000000..d176e5b39 --- /dev/null +++ b/cmd/workspace/consumer-installations/consumer-installations.go @@ -0,0 +1,354 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_installations + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-installations", + Short: `Installations are entities that allow consumers to interact with Databricks Marketplace listings.`, + Long: `Installations are entities that allow consumers to interact with Databricks + Marketplace listings.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListListingInstallations()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateInstallationRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateInstallationRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + // TODO: complex arg: accepted_consumer_terms + cmd.Flags().StringVar(&createReq.CatalogName, "catalog-name", createReq.CatalogName, ``) + cmd.Flags().Var(&createReq.RecipientType, "recipient-type", `. Supported values: [DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS, DELTA_SHARING_RECIPIENT_TYPE_OPEN]`) + // TODO: complex arg: repo_detail + cmd.Flags().StringVar(&createReq.ShareName, "share-name", createReq.ShareName, ``) + + cmd.Use = "create LISTING_ID" + cmd.Short = `Install from a listing.` + cmd.Long = `Install from a listing. + + Install payload associated with a Databricks Marketplace listing.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + createReq.ListingId = args[0] + + response, err := w.ConsumerInstallations.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteInstallationRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteInstallationRequest + + // TODO: short flags + + cmd.Use = "delete LISTING_ID INSTALLATION_ID" + cmd.Short = `Uninstall from a listing.` + cmd.Long = `Uninstall from a listing. + + Uninstall an installation associated with a Databricks Marketplace listing.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.ListingId = args[0] + deleteReq.InstallationId = args[1] + + err = w.ConsumerInstallations.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListAllInstallationsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListAllInstallationsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List all installations.` + cmd.Long = `List all installations. + + List all installations across all listings.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ConsumerInstallations.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start list-listing-installations command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listListingInstallationsOverrides []func( + *cobra.Command, + *marketplace.ListInstallationsRequest, +) + +func newListListingInstallations() *cobra.Command { + cmd := &cobra.Command{} + + var listListingInstallationsReq marketplace.ListInstallationsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listListingInstallationsReq.PageSize, "page-size", listListingInstallationsReq.PageSize, ``) + cmd.Flags().StringVar(&listListingInstallationsReq.PageToken, "page-token", listListingInstallationsReq.PageToken, ``) + + cmd.Use = "list-listing-installations LISTING_ID" + cmd.Short = `List installations for a listing.` + cmd.Long = `List installations for a listing. + + List all installations for a particular listing.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listListingInstallationsReq.ListingId = args[0] + + response := w.ConsumerInstallations.ListListingInstallations(ctx, listListingInstallationsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listListingInstallationsOverrides { + fn(cmd, &listListingInstallationsReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateInstallationRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateInstallationRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().BoolVar(&updateReq.RotateToken, "rotate-token", updateReq.RotateToken, ``) + + cmd.Use = "update LISTING_ID INSTALLATION_ID" + cmd.Short = `Update an installation.` + cmd.Long = `Update an installation. + + This is a update API that will update the part of the fields defined in the + installation table as well as interact with external services according to the + fields not included in the installation table 1. the token will be rotate if + the rotateToken flag is true 2. the token will be forcibly rotate if the + rotateToken flag is true and the tokenInfo field is empty` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.ListingId = args[0] + updateReq.InstallationId = args[1] + + response, err := w.ConsumerInstallations.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ConsumerInstallations diff --git a/cmd/workspace/consumer-listings/consumer-listings.go b/cmd/workspace/consumer-listings/consumer-listings.go new file mode 100755 index 000000000..18f3fb39e --- /dev/null +++ b/cmd/workspace/consumer-listings/consumer-listings.go @@ -0,0 +1,313 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_listings + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-listings", + Short: `Listings are the core entities in the Marketplace.`, + Long: `Listings are the core entities in the Marketplace. They represent the products + that are available for consumption.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newBatchGet()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSearch()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start batch-get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var batchGetOverrides []func( + *cobra.Command, + *marketplace.BatchGetListingsRequest, +) + +func newBatchGet() *cobra.Command { + cmd := &cobra.Command{} + + var batchGetReq marketplace.BatchGetListingsRequest + + // TODO: short flags + + // TODO: array: ids + + cmd.Use = "batch-get" + cmd.Short = `Get one batch of listings.` + cmd.Long = `Get one batch of listings. One may specify up to 50 IDs per request. + + Batch get a published listing in the Databricks Marketplace that the consumer + has access to.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.ConsumerListings.BatchGet(ctx, batchGetReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range batchGetOverrides { + fn(cmd, &batchGetReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetListingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetListingRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get listing.` + cmd.Long = `Get listing. + + Get a published listing in the Databricks Marketplace that the consumer has + access to.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Consumer Listings drop-down." + names, err := w.ConsumerListings.ListingSummaryNameToIdMap(ctx, marketplace.ListListingsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Consumer Listings drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.Id = args[0] + + response, err := w.ConsumerListings.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListListingsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListListingsRequest + + // TODO: short flags + + // TODO: array: assets + // TODO: array: categories + cmd.Flags().BoolVar(&listReq.IsAscending, "is-ascending", listReq.IsAscending, ``) + cmd.Flags().BoolVar(&listReq.IsFree, "is-free", listReq.IsFree, `Filters each listing based on if it is free.`) + cmd.Flags().BoolVar(&listReq.IsPrivateExchange, "is-private-exchange", listReq.IsPrivateExchange, `Filters each listing based on if it is a private exchange.`) + cmd.Flags().BoolVar(&listReq.IsStaffPick, "is-staff-pick", listReq.IsStaffPick, `Filters each listing based on whether it is a staff pick.`) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + // TODO: array: provider_ids + cmd.Flags().Var(&listReq.SortBy, "sort-by", `Criteria for sorting the resulting set of listings. Supported values: [SORT_BY_DATE, SORT_BY_RELEVANCE, SORT_BY_TITLE, SORT_BY_UNSPECIFIED]`) + // TODO: array: tags + + cmd.Use = "list" + cmd.Short = `List listings.` + cmd.Long = `List listings. + + List all published listings in the Databricks Marketplace that the consumer + has access to.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ConsumerListings.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start search command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var searchOverrides []func( + *cobra.Command, + *marketplace.SearchListingsRequest, +) + +func newSearch() *cobra.Command { + cmd := &cobra.Command{} + + var searchReq marketplace.SearchListingsRequest + + // TODO: short flags + + // TODO: array: assets + // TODO: array: categories + cmd.Flags().BoolVar(&searchReq.IsAscending, "is-ascending", searchReq.IsAscending, ``) + cmd.Flags().BoolVar(&searchReq.IsFree, "is-free", searchReq.IsFree, ``) + cmd.Flags().BoolVar(&searchReq.IsPrivateExchange, "is-private-exchange", searchReq.IsPrivateExchange, ``) + cmd.Flags().IntVar(&searchReq.PageSize, "page-size", searchReq.PageSize, ``) + cmd.Flags().StringVar(&searchReq.PageToken, "page-token", searchReq.PageToken, ``) + // TODO: array: provider_ids + cmd.Flags().Var(&searchReq.SortBy, "sort-by", `. Supported values: [SORT_BY_DATE, SORT_BY_RELEVANCE, SORT_BY_TITLE, SORT_BY_UNSPECIFIED]`) + + cmd.Use = "search QUERY" + cmd.Short = `Search listings.` + cmd.Long = `Search listings. + + Search published listings in the Databricks Marketplace that the consumer has + access to. This query supports a variety of different search parameters and + performs fuzzy matching. + + Arguments: + QUERY: Fuzzy matches query` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No QUERY argument specified. Loading names for Consumer Listings drop-down." + names, err := w.ConsumerListings.ListingSummaryNameToIdMap(ctx, marketplace.ListListingsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Consumer Listings drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Fuzzy matches query") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have fuzzy matches query") + } + searchReq.Query = args[0] + + response := w.ConsumerListings.Search(ctx, searchReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range searchOverrides { + fn(cmd, &searchReq) + } + + return cmd +} + +// end service ConsumerListings diff --git a/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go new file mode 100755 index 000000000..c55ca4ee1 --- /dev/null +++ b/cmd/workspace/consumer-personalization-requests/consumer-personalization-requests.go @@ -0,0 +1,228 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_personalization_requests + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-personalization-requests", + Short: `Personalization Requests allow customers to interact with the individualized Marketplace listing flow.`, + Long: `Personalization Requests allow customers to interact with the individualized + Marketplace listing flow.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreatePersonalizationRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreatePersonalizationRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, ``) + cmd.Flags().StringVar(&createReq.Company, "company", createReq.Company, ``) + cmd.Flags().StringVar(&createReq.FirstName, "first-name", createReq.FirstName, ``) + cmd.Flags().BoolVar(&createReq.IsFromLighthouse, "is-from-lighthouse", createReq.IsFromLighthouse, ``) + cmd.Flags().StringVar(&createReq.LastName, "last-name", createReq.LastName, ``) + cmd.Flags().Var(&createReq.RecipientType, "recipient-type", `. Supported values: [DELTA_SHARING_RECIPIENT_TYPE_DATABRICKS, DELTA_SHARING_RECIPIENT_TYPE_OPEN]`) + + cmd.Use = "create LISTING_ID" + cmd.Short = `Create a personalization request.` + cmd.Long = `Create a personalization request. + + Create a personalization request for a listing.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createReq.ListingId = args[0] + + response, err := w.ConsumerPersonalizationRequests.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetPersonalizationRequestRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetPersonalizationRequestRequest + + // TODO: short flags + + cmd.Use = "get LISTING_ID" + cmd.Short = `Get the personalization request for a listing.` + cmd.Long = `Get the personalization request for a listing. + + Get the personalization request for a listing. Each consumer can make at + *most* one personalization request for a listing.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.ListingId = args[0] + + response, err := w.ConsumerPersonalizationRequests.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListAllPersonalizationRequestsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListAllPersonalizationRequestsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List all personalization requests.` + cmd.Long = `List all personalization requests. + + List personalization requests for a consumer across all listings.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ConsumerPersonalizationRequests.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service ConsumerPersonalizationRequests diff --git a/cmd/workspace/consumer-providers/consumer-providers.go b/cmd/workspace/consumer-providers/consumer-providers.go new file mode 100755 index 000000000..579a89516 --- /dev/null +++ b/cmd/workspace/consumer-providers/consumer-providers.go @@ -0,0 +1,224 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package consumer_providers + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "consumer-providers", + Short: `Providers are the entities that publish listings to the Marketplace.`, + Long: `Providers are the entities that publish listings to the Marketplace.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newBatchGet()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start batch-get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var batchGetOverrides []func( + *cobra.Command, + *marketplace.BatchGetProvidersRequest, +) + +func newBatchGet() *cobra.Command { + cmd := &cobra.Command{} + + var batchGetReq marketplace.BatchGetProvidersRequest + + // TODO: short flags + + // TODO: array: ids + + cmd.Use = "batch-get" + cmd.Short = `Get one batch of providers.` + cmd.Long = `Get one batch of providers. One may specify up to 50 IDs per request. + + Batch get a provider in the Databricks Marketplace with at least one visible + listing.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.ConsumerProviders.BatchGet(ctx, batchGetReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range batchGetOverrides { + fn(cmd, &batchGetReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetProviderRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetProviderRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get a provider.` + cmd.Long = `Get a provider. + + Get a provider in the Databricks Marketplace with at least one visible + listing.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Consumer Providers drop-down." + names, err := w.ConsumerProviders.ProviderInfoNameToIdMap(ctx, marketplace.ListProvidersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Consumer Providers drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.Id = args[0] + + response, err := w.ConsumerProviders.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListProvidersRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListProvidersRequest + + // TODO: short flags + + cmd.Flags().BoolVar(&listReq.IsFeatured, "is-featured", listReq.IsFeatured, ``) + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List providers.` + cmd.Long = `List providers. + + List all providers in the Databricks Marketplace with at least one visible + listing.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ConsumerProviders.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service ConsumerProviders diff --git a/cmd/workspace/credentials-manager/credentials-manager.go b/cmd/workspace/credentials-manager/credentials-manager.go index 30b33f7b3..5a40232b3 100755 --- a/cmd/workspace/credentials-manager/credentials-manager.go +++ b/cmd/workspace/credentials-manager/credentials-manager.go @@ -31,6 +31,9 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newExchangeToken()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -61,8 +64,8 @@ func newExchangeToken() *cobra.Command { cmd.Short = `Exchange token.` cmd.Long = `Exchange token. - Exchange tokens with an Identity Provider to get a new access token. It - allowes specifying scopes to determine token permissions.` + Exchange tokens with an Identity Provider to get a new access token. It allows + specifying scopes to determine token permissions.` cmd.Annotations = make(map[string]string) @@ -99,10 +102,4 @@ func newExchangeToken() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newExchangeToken()) - }) -} - // end service CredentialsManager diff --git a/cmd/workspace/current-user/current-user.go b/cmd/workspace/current-user/current-user.go index cb18e71d2..a42c3ead5 100755 --- a/cmd/workspace/current-user/current-user.go +++ b/cmd/workspace/current-user/current-user.go @@ -24,6 +24,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newMe()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -74,10 +77,4 @@ func newMe() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newMe()) - }) -} - // end service CurrentUser diff --git a/cmd/workspace/dashboard-widgets/dashboard-widgets.go b/cmd/workspace/dashboard-widgets/dashboard-widgets.go index 43a972e03..02b13739a 100755 --- a/cmd/workspace/dashboard-widgets/dashboard-widgets.go +++ b/cmd/workspace/dashboard-widgets/dashboard-widgets.go @@ -32,6 +32,11 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -97,12 +102,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -129,7 +128,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -159,12 +158,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -193,7 +186,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -231,10 +224,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service DashboardWidgets diff --git a/cmd/workspace/dashboards/dashboards.go b/cmd/workspace/dashboards/dashboards.go index cd3227af9..fcab0aa2a 100755 --- a/cmd/workspace/dashboards/dashboards.go +++ b/cmd/workspace/dashboards/dashboards.go @@ -32,6 +32,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRestore()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -97,12 +105,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -122,7 +124,7 @@ func newDelete() *cobra.Command { cmd.Use = "delete DASHBOARD_ID" cmd.Short = `Remove a dashboard.` cmd.Long = `Remove a dashboard. - + Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared.` @@ -171,12 +173,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -196,7 +192,7 @@ func newGet() *cobra.Command { cmd.Use = "get DASHBOARD_ID" cmd.Short = `Retrieve a definition.` cmd.Long = `Retrieve a definition. - + Returns a JSON representation of a dashboard object, including its visualization and query objects.` @@ -245,12 +241,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -275,13 +265,16 @@ func newList() *cobra.Command { cmd.Use = "list" cmd.Short = `Get dashboard objects.` cmd.Long = `Get dashboard objects. - - Fetch a paginated list of dashboard objects.` + + Fetch a paginated list of dashboard objects. + + **Warning**: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -290,11 +283,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Dashboards.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Dashboards.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -309,12 +299,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start restore command // Slice with functions to override default command behavior. @@ -334,7 +318,7 @@ func newRestore() *cobra.Command { cmd.Use = "restore DASHBOARD_ID" cmd.Short = `Restore a dashboard.` cmd.Long = `Restore a dashboard. - + A restored dashboard appears in list views and searches and can be shared.` cmd.Annotations = make(map[string]string) @@ -382,10 +366,86 @@ func newRestore() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestore()) - }) +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *sql.DashboardEditContent, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq sql.DashboardEditContent + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this dashboard that appears in list views and at the top of the dashboard page.`) + cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`) + // TODO: array: tags + + cmd.Use = "update DASHBOARD_ID" + cmd.Short = `Change a dashboard definition.` + cmd.Long = `Change a dashboard definition. + + Modify this dashboard definition. This operation only affects attributes of + the dashboard object. It does not add, modify, or remove widgets. + + **Note**: You cannot undo this operation.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No DASHBOARD_ID argument specified. Loading names for Dashboards drop-down." + names, err := w.Dashboards.DashboardNameToIdMap(ctx, sql.ListDashboardsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Dashboards drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + updateReq.DashboardId = args[0] + + response, err := w.Dashboards.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd } // end service Dashboards diff --git a/cmd/workspace/dashboards/overrides.go b/cmd/workspace/dashboards/overrides.go index 709e657f8..6a26ebbfb 100644 --- a/cmd/workspace/dashboards/overrides.go +++ b/cmd/workspace/dashboards/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, _ *sql.ListDashboardsRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{range .}}{{.Id|green}} {{.Name}} {{end}}`) } diff --git a/cmd/workspace/data-sources/data-sources.go b/cmd/workspace/data-sources/data-sources.go index 969399f42..f310fe50a 100755 --- a/cmd/workspace/data-sources/data-sources.go +++ b/cmd/workspace/data-sources/data-sources.go @@ -25,13 +25,21 @@ func New() *cobra.Command { This API does not support searches. It returns the full list of SQL warehouses in your workspace. We advise you to use any text editor, REST client, or grep to search the response from this API for the name of your SQL warehouse - as it appears in Databricks SQL.`, + as it appears in Databricks SQL. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", }, } + // Add methods + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -57,7 +65,12 @@ func newList() *cobra.Command { Retrieves a full list of SQL warehouses available in this workspace. All fields that appear in this API response are enumerated for clarity. However, - you need only a SQL warehouse's id to create new queries against it.` + you need only a SQL warehouse's id to create new queries against it. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -84,10 +97,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service DataSources diff --git a/cmd/workspace/default-namespace/default-namespace.go b/cmd/workspace/default-namespace/default-namespace.go new file mode 100755 index 000000000..b15907bec --- /dev/null +++ b/cmd/workspace/default-namespace/default-namespace.go @@ -0,0 +1,229 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package default_namespace + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "default-namespace", + Short: `The default namespace setting API allows users to configure the default namespace for a Databricks workspace.`, + Long: `The default namespace setting API allows users to configure the default + namespace for a Databricks workspace. + + Through this API, users can retrieve, set, or modify the default namespace + used when queries do not reference a fully qualified three-level name. For + example, if you use the API to set 'retail_prod' as the default catalog, then + a query 'SELECT * FROM myTable' would reference the object + 'retail_prod.default.myTable' (the schema 'default' is always assumed). + + This setting requires a restart of clusters and SQL warehouses to take effect. + Additionally, the default namespace only applies when using Unity + Catalog-enabled compute.`, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteDefaultNamespaceSettingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteDefaultNamespaceSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the default namespace setting.` + cmd.Long = `Delete the default namespace setting. + + Deletes the default namespace setting for the workspace. A fresh etag needs to + be provided in DELETE requests (as a query parameter). The etag can be + retrieved by making a GET request before the DELETE request. If the + setting is updated/deleted concurrently, DELETE fails with 409 and the + request must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.DefaultNamespace().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetDefaultNamespaceSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetDefaultNamespaceSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the default namespace setting.` + cmd.Long = `Get the default namespace setting. + + Gets the default namespace setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.DefaultNamespace().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateDefaultNamespaceSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateDefaultNamespaceSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the default namespace setting.` + cmd.Long = `Update the default namespace setting. + + Updates the default namespace setting for the workspace. A fresh etag needs to + be provided in PATCH requests (as part of the setting field). The etag can + be retrieved by making a GET request before the PATCH request. Note that + if the setting does not exist, GET returns a NOT_FOUND error and the etag is + present in the error response, which should be set in the PATCH request. If + the setting is updated concurrently, PATCH fails with 409 and the request + must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.DefaultNamespace().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service DefaultNamespace diff --git a/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go b/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go new file mode 100755 index 000000000..a8acc5cd1 --- /dev/null +++ b/cmd/workspace/enhanced-security-monitoring/enhanced-security-monitoring.go @@ -0,0 +1,162 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package enhanced_security_monitoring + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "enhanced-security-monitoring", + Short: `Controls whether enhanced security monitoring is enabled for the current workspace.`, + Long: `Controls whether enhanced security monitoring is enabled for the current + workspace. If the compliance security profile is enabled, this is + automatically enabled. By default, it is disabled. However, if the compliance + security profile is enabled, this is automatically enabled. + + If the compliance security profile is disabled, you can enable or disable this + setting and it is not permanent.`, + } + + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetEnhancedSecurityMonitoringSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetEnhancedSecurityMonitoringSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the enhanced security monitoring setting.` + cmd.Long = `Get the enhanced security monitoring setting. + + Gets the enhanced security monitoring setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.EnhancedSecurityMonitoring().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateEnhancedSecurityMonitoringSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateEnhancedSecurityMonitoringSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the enhanced security monitoring setting.` + cmd.Long = `Update the enhanced security monitoring setting. + + Updates the enhanced security monitoring setting for the workspace. A fresh + etag needs to be provided in PATCH requests (as part of the setting field). + The etag can be retrieved by making a GET request before the PATCH + request. If the setting is updated concurrently, PATCH fails with 409 and + the request must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.EnhancedSecurityMonitoring().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service EnhancedSecurityMonitoring diff --git a/cmd/workspace/experiments/experiments.go b/cmd/workspace/experiments/experiments.go index 79828714c..e1e00380b 100755 --- a/cmd/workspace/experiments/experiments.go +++ b/cmd/workspace/experiments/experiments.go @@ -35,6 +35,38 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateExperiment()) + cmd.AddCommand(newCreateRun()) + cmd.AddCommand(newDeleteExperiment()) + cmd.AddCommand(newDeleteRun()) + cmd.AddCommand(newDeleteRuns()) + cmd.AddCommand(newDeleteTag()) + cmd.AddCommand(newGetByName()) + cmd.AddCommand(newGetExperiment()) + cmd.AddCommand(newGetHistory()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetRun()) + cmd.AddCommand(newListArtifacts()) + cmd.AddCommand(newListExperiments()) + cmd.AddCommand(newLogBatch()) + cmd.AddCommand(newLogInputs()) + cmd.AddCommand(newLogMetric()) + cmd.AddCommand(newLogModel()) + cmd.AddCommand(newLogParam()) + cmd.AddCommand(newRestoreExperiment()) + cmd.AddCommand(newRestoreRun()) + cmd.AddCommand(newRestoreRuns()) + cmd.AddCommand(newSearchExperiments()) + cmd.AddCommand(newSearchRuns()) + cmd.AddCommand(newSetExperimentTag()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newSetTag()) + cmd.AddCommand(newUpdateExperiment()) + cmd.AddCommand(newUpdatePermissions()) + cmd.AddCommand(newUpdateRun()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -82,13 +114,13 @@ func newCreateExperiment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -126,12 +158,6 @@ func newCreateExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateExperiment()) - }) -} - // start create-run command // Slice with functions to override default command behavior. @@ -167,7 +193,7 @@ func newCreateRun() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -202,12 +228,6 @@ func newCreateRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateRun()) - }) -} - // start delete-experiment command // Slice with functions to override default command behavior. @@ -241,13 +261,13 @@ func newDeleteExperiment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -285,12 +305,6 @@ func newDeleteExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteExperiment()) - }) -} - // start delete-run command // Slice with functions to override default command behavior. @@ -322,13 +336,13 @@ func newDeleteRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -366,12 +380,6 @@ func newDeleteRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteRun()) - }) -} - // start delete-runs command // Slice with functions to override default command behavior. @@ -397,7 +405,9 @@ func newDeleteRuns() *cobra.Command { cmd.Long = `Delete runs by creation time. Bulk delete runs in an experiment that were created prior to or at the - specified timestamp. Deletes at most max_runs per request. + specified timestamp. Deletes at most max_runs per request. To call this API + from a Databricks Notebook in Python, you can use the client code snippet on + https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete. Arguments: EXPERIMENT_ID: The ID of the experiment containing the runs to delete. @@ -409,13 +419,13 @@ func newDeleteRuns() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id', 'max_timestamp_millis' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -459,12 +469,6 @@ func newDeleteRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteRuns()) - }) -} - // start delete-tag command // Slice with functions to override default command behavior. @@ -498,13 +502,13 @@ func newDeleteTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id', 'key' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -545,12 +549,6 @@ func newDeleteTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteTag()) - }) -} - // start get-by-name command // Slice with functions to override default command behavior. @@ -587,7 +585,7 @@ func newGetByName() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -617,12 +615,6 @@ func newGetByName() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetByName()) - }) -} - // start get-experiment command // Slice with functions to override default command behavior. @@ -651,7 +643,7 @@ func newGetExperiment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -681,12 +673,6 @@ func newGetExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetExperiment()) - }) -} - // start get-history command // Slice with functions to override default command behavior. @@ -720,7 +706,7 @@ func newGetHistory() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -731,11 +717,8 @@ func newGetHistory() *cobra.Command { getHistoryReq.MetricKey = args[0] - response, err := w.Experiments.GetHistoryAll(ctx, getHistoryReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.GetHistory(ctx, getHistoryReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -750,12 +733,6 @@ func newGetHistory() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetHistory()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -784,7 +761,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -814,12 +791,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -849,7 +820,7 @@ func newGetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -879,12 +850,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-run command // Slice with functions to override default command behavior. @@ -920,7 +885,7 @@ func newGetRun() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -950,12 +915,6 @@ func newGetRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRun()) - }) -} - // start list-artifacts command // Slice with functions to override default command behavior. @@ -987,7 +946,7 @@ func newListArtifacts() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -996,11 +955,8 @@ func newListArtifacts() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Experiments.ListArtifactsAll(ctx, listArtifactsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.ListArtifacts(ctx, listArtifactsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1015,12 +971,6 @@ func newListArtifacts() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListArtifacts()) - }) -} - // start list-experiments command // Slice with functions to override default command behavior. @@ -1050,7 +1000,7 @@ func newListExperiments() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1059,11 +1009,8 @@ func newListExperiments() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Experiments.ListExperimentsAll(ctx, listExperimentsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.ListExperiments(ctx, listExperimentsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1078,12 +1025,6 @@ func newListExperiments() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListExperiments()) - }) -} - // start log-batch command // Slice with functions to override default command behavior. @@ -1152,7 +1093,7 @@ func newLogBatch() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1187,12 +1128,6 @@ func newLogBatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogBatch()) - }) -} - // start log-inputs command // Slice with functions to override default command behavior. @@ -1224,7 +1159,7 @@ func newLogInputs() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1259,12 +1194,6 @@ func newLogInputs() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogInputs()) - }) -} - // start log-metric command // Slice with functions to override default command behavior. @@ -1304,13 +1233,13 @@ func newLogMetric() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'key', 'value', 'timestamp' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -1360,12 +1289,6 @@ func newLogMetric() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogMetric()) - }) -} - // start log-model command // Slice with functions to override default command behavior. @@ -1397,7 +1320,7 @@ func newLogModel() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1432,12 +1355,6 @@ func newLogModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogModel()) - }) -} - // start log-param command // Slice with functions to override default command behavior. @@ -1476,13 +1393,13 @@ func newLogParam() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1523,12 +1440,6 @@ func newLogParam() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogParam()) - }) -} - // start restore-experiment command // Slice with functions to override default command behavior. @@ -1565,13 +1476,13 @@ func newRestoreExperiment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1609,12 +1520,6 @@ func newRestoreExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestoreExperiment()) - }) -} - // start restore-run command // Slice with functions to override default command behavior. @@ -1646,13 +1551,13 @@ func newRestoreRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1690,12 +1595,6 @@ func newRestoreRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestoreRun()) - }) -} - // start restore-runs command // Slice with functions to override default command behavior. @@ -1721,7 +1620,9 @@ func newRestoreRuns() *cobra.Command { cmd.Long = `Restore runs by deletion time. Bulk restore runs in an experiment that were deleted no earlier than the - specified timestamp. Restores at most max_runs per request. + specified timestamp. Restores at most max_runs per request. To call this API + from a Databricks Notebook in Python, you can use the client code snippet on + https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore. Arguments: EXPERIMENT_ID: The ID of the experiment containing the runs to restore. @@ -1733,13 +1634,13 @@ func newRestoreRuns() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id', 'min_timestamp_millis' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1783,12 +1684,6 @@ func newRestoreRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestoreRuns()) - }) -} - // start search-experiments command // Slice with functions to override default command behavior. @@ -1822,7 +1717,7 @@ func newSearchExperiments() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1838,11 +1733,8 @@ func newSearchExperiments() *cobra.Command { } } - response, err := w.Experiments.SearchExperimentsAll(ctx, searchExperimentsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.SearchExperiments(ctx, searchExperimentsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1857,12 +1749,6 @@ func newSearchExperiments() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSearchExperiments()) - }) -} - // start search-runs command // Slice with functions to override default command behavior. @@ -1899,7 +1785,7 @@ func newSearchRuns() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1915,11 +1801,8 @@ func newSearchRuns() *cobra.Command { } } - response, err := w.Experiments.SearchRunsAll(ctx, searchRunsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Experiments.SearchRuns(ctx, searchRunsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1934,12 +1817,6 @@ func newSearchRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSearchRuns()) - }) -} - // start set-experiment-tag command // Slice with functions to override default command behavior. @@ -1976,13 +1853,13 @@ func newSetExperimentTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id', 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -2026,12 +1903,6 @@ func newSetExperimentTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetExperimentTag()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -2065,7 +1936,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2101,12 +1972,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start set-tag command // Slice with functions to override default command behavior. @@ -2146,13 +2011,13 @@ func newSetTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -2193,12 +2058,6 @@ func newSetTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetTag()) - }) -} - // start update-experiment command // Slice with functions to override default command behavior. @@ -2232,13 +2091,13 @@ func newUpdateExperiment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'experiment_id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2276,12 +2135,6 @@ func newUpdateExperiment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateExperiment()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -2315,7 +2168,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2351,12 +2204,6 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // start update-run command // Slice with functions to override default command behavior. @@ -2389,7 +2236,7 @@ func newUpdateRun() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -2424,10 +2271,4 @@ func newUpdateRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateRun()) - }) -} - // end service Experiments diff --git a/cmd/workspace/external-locations/external-locations.go b/cmd/workspace/external-locations/external-locations.go index b4166086d..8f0dd346a 100755 --- a/cmd/workspace/external-locations/external-locations.go +++ b/cmd/workspace/external-locations/external-locations.go @@ -39,6 +39,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -88,13 +95,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'url', 'credential_name' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -138,12 +145,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -175,7 +176,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -205,12 +206,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -227,6 +222,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include external locations in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get NAME" cmd.Short = `Get an external location.` cmd.Long = `Get an external location. @@ -241,7 +238,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -271,12 +268,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -293,6 +284,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include external locations in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of external locations to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -302,15 +294,13 @@ func newList() *cobra.Command { Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. The caller must be a metastore admin, the owner of the external - location, or a user that has some privilege on the external location. For - unpaginated request, there is no guarantee of a specific ordering of the - elements in the array. For paginated request, elements are ordered by their - name.` + location, or a user that has some privilege on the external location. There is + no guarantee of a specific ordering of the elements in the array.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -319,11 +309,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ExternalLocations.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ExternalLocations.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -338,12 +325,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -367,6 +348,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`) // TODO: complex arg: encryption_details cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the external location.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Indicates whether the external location is read-only.`) @@ -387,7 +369,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -423,10 +405,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service ExternalLocations diff --git a/cmd/workspace/external-locations/overrides.go b/cmd/workspace/external-locations/overrides.go index 63a30cfc3..00b4921d4 100644 --- a/cmd/workspace/external-locations/overrides.go +++ b/cmd/workspace/external-locations/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *catalog.ListExternalLocationsRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Name"}} {{header "Credential"}} {{header "URL"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Name"}} {{header "Credential"}} {{header "URL"}} {{range .}}{{.Name|green}} {{.CredentialName|cyan}} {{.Url}} {{end}}`) } diff --git a/cmd/workspace/functions/functions.go b/cmd/workspace/functions/functions.go index 35356be0f..c8de48797 100755 --- a/cmd/workspace/functions/functions.go +++ b/cmd/workspace/functions/functions.go @@ -32,6 +32,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -62,6 +69,8 @@ func newCreate() *cobra.Command { cmd.Short = `Create a function.` cmd.Long = `Create a function. + **WARNING: This API is experimental and will change in future versions** + Creates a new function The user must have the following permissions in order for the function to be @@ -103,12 +112,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -187,12 +190,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -209,6 +206,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include functions in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get NAME" cmd.Short = `Get a function.` cmd.Long = `Get a function. @@ -270,12 +269,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -292,6 +285,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include functions in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of functions to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -304,9 +298,8 @@ func newList() *cobra.Command { the user must have the **USE_CATALOG** privilege on the catalog and the **USE_SCHEMA** privilege on the schema, and the output list contains only functions for which either the user has the **EXECUTE** privilege or the user - is the owner. For unpaginated request, there is no guarantee of a specific - ordering of the elements in the array. For paginated request, elements are - ordered by their name. + is the owner. There is no guarantee of a specific ordering of the elements in + the array. Arguments: CATALOG_NAME: Name of parent catalog for functions of interest. @@ -315,7 +308,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -327,11 +320,8 @@ func newList() *cobra.Command { listReq.CatalogName = args[0] listReq.SchemaName = args[1] - response, err := w.Functions.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Functions.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -346,12 +336,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -440,10 +424,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Functions diff --git a/cmd/workspace/git-credentials/git-credentials.go b/cmd/workspace/git-credentials/git-credentials.go index ca256564c..2e8cc2cd4 100755 --- a/cmd/workspace/git-credentials/git-credentials.go +++ b/cmd/workspace/git-credentials/git-credentials.go @@ -32,6 +32,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -58,7 +65,7 @@ func newCreate() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Flags().StringVar(&createReq.GitUsername, "git-username", createReq.GitUsername, `Git username.`) + cmd.Flags().StringVar(&createReq.GitUsername, "git-username", createReq.GitUsername, `The username or email provided with your Git provider account, depending on which provider you are using.`) cmd.Flags().StringVar(&createReq.PersonalAccessToken, "personal-access-token", createReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`) cmd.Use = "create GIT_PROVIDER" @@ -79,13 +86,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'git_provider' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -123,12 +130,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -202,12 +203,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -281,12 +276,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -311,11 +300,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.GitCredentials.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.GitCredentials.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -330,12 +316,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -355,7 +335,7 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.GitProvider, "git-provider", updateReq.GitProvider, `Git provider.`) - cmd.Flags().StringVar(&updateReq.GitUsername, "git-username", updateReq.GitUsername, `Git username.`) + cmd.Flags().StringVar(&updateReq.GitUsername, "git-username", updateReq.GitUsername, `The username or email provided with your Git provider account, depending on which provider you are using.`) cmd.Flags().StringVar(&updateReq.PersonalAccessToken, "personal-access-token", updateReq.PersonalAccessToken, `The personal access token used to authenticate to the corresponding Git provider.`) cmd.Use = "update CREDENTIAL_ID" @@ -421,10 +401,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service GitCredentials diff --git a/cmd/workspace/global-init-scripts/global-init-scripts.go b/cmd/workspace/global-init-scripts/global-init-scripts.go index 1479381da..92dcb2592 100755 --- a/cmd/workspace/global-init-scripts/global-init-scripts.go +++ b/cmd/workspace/global-init-scripts/global-init-scripts.go @@ -35,6 +35,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -78,13 +85,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'script' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -125,12 +132,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -201,12 +202,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -277,12 +272,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -301,7 +290,7 @@ func newList() *cobra.Command { Get a list of all global init scripts for this workspace. This returns all properties for each script but **not** the script contents. To retrieve the contents of a script, use the [get a global init - script](#operation/get-script) operation.` + script](:method:globalinitscripts/get) operation.` cmd.Annotations = make(map[string]string) @@ -309,11 +298,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.GlobalInitScripts.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.GlobalInitScripts.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -328,12 +314,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -371,13 +351,13 @@ func newUpdate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only SCRIPT_ID as positional arguments. Provide 'name', 'script' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -419,10 +399,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service GlobalInitScripts diff --git a/cmd/workspace/grants/grants.go b/cmd/workspace/grants/grants.go index 020e0bf8b..876f0343e 100755 --- a/cmd/workspace/grants/grants.go +++ b/cmd/workspace/grants/grants.go @@ -37,6 +37,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetEffective()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -76,7 +81,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -110,12 +115,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-effective command // Slice with functions to override default command behavior. @@ -147,7 +146,7 @@ func newGetEffective() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -181,12 +180,6 @@ func newGetEffective() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetEffective()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -220,7 +213,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -260,10 +253,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Grants diff --git a/cmd/workspace/groups.go b/cmd/workspace/groups.go index 8f3768137..d8a4dec4f 100644 --- a/cmd/workspace/groups.go +++ b/cmd/workspace/groups.go @@ -64,5 +64,9 @@ func Groups() []cobra.Group { ID: "dashboards", Title: "Dashboards", }, + { + ID: "marketplace", + Title: "Marketplace", + }, } } diff --git a/cmd/workspace/groups/groups.go b/cmd/workspace/groups/groups.go index 588bce316..14650d984 100755 --- a/cmd/workspace/groups/groups.go +++ b/cmd/workspace/groups/groups.go @@ -33,6 +33,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -79,7 +87,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -114,12 +122,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -190,12 +192,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -266,12 +262,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -305,7 +295,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -314,11 +304,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Groups.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Groups.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -333,12 +320,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -420,12 +401,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -514,10 +489,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Groups diff --git a/cmd/workspace/instance-pools/instance-pools.go b/cmd/workspace/instance-pools/instance-pools.go index 968f64bc6..db96f1466 100755 --- a/cmd/workspace/instance-pools/instance-pools.go +++ b/cmd/workspace/instance-pools/instance-pools.go @@ -44,6 +44,17 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -101,13 +112,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_pool_name', 'node_type_id' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -148,12 +159,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -186,7 +191,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_pool_id' in your JSON input") } @@ -245,12 +250,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -294,13 +293,13 @@ func newEdit() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_pool_id', 'instance_pool_name', 'node_type_id' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -344,12 +343,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -420,12 +413,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -496,12 +483,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -573,12 +554,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -602,11 +577,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.InstancePools.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.InstancePools.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -621,12 +593,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -708,12 +674,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -795,10 +755,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service InstancePools diff --git a/cmd/workspace/instance-profiles/instance-profiles.go b/cmd/workspace/instance-profiles/instance-profiles.go index ca78a15f2..7134c16c6 100755 --- a/cmd/workspace/instance-profiles/instance-profiles.go +++ b/cmd/workspace/instance-profiles/instance-profiles.go @@ -32,6 +32,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newAdd()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRemove()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -77,13 +83,13 @@ func newAdd() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_profile_arn' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -121,12 +127,6 @@ func newAdd() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newAdd()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -176,13 +176,13 @@ func newEdit() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_profile_arn' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -220,12 +220,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -251,11 +245,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.InstanceProfiles.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.InstanceProfiles.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -270,12 +261,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start remove command // Slice with functions to override default command behavior. @@ -310,13 +295,13 @@ func newRemove() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'instance_profile_arn' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -354,10 +339,4 @@ func newRemove() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRemove()) - }) -} - // end service InstanceProfiles diff --git a/cmd/workspace/ip-access-lists/ip-access-lists.go b/cmd/workspace/ip-access-lists/ip-access-lists.go index 5bba8b51d..ec5958b5b 100755 --- a/cmd/workspace/ip-access-lists/ip-access-lists.go +++ b/cmd/workspace/ip-access-lists/ip-access-lists.go @@ -47,6 +47,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newReplace()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -109,13 +117,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'label', 'list_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -159,12 +167,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -235,12 +237,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -311,12 +307,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -340,11 +330,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.IpAccessLists.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.IpAccessLists.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -359,12 +346,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start replace command // Slice with functions to override default command behavior. @@ -417,13 +398,13 @@ func newReplace() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only IP_ACCESS_LIST_ID as positional arguments. Provide 'label', 'list_type', 'enabled' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -474,12 +455,6 @@ func newReplace() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReplace()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -578,10 +553,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service IpAccessLists diff --git a/cmd/workspace/jobs/jobs.go b/cmd/workspace/jobs/jobs.go index 634a7f399..50a045921 100755 --- a/cmd/workspace/jobs/jobs.go +++ b/cmd/workspace/jobs/jobs.go @@ -45,6 +45,28 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCancelAllRuns()) + cmd.AddCommand(newCancelRun()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeleteRun()) + cmd.AddCommand(newExportRun()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetRun()) + cmd.AddCommand(newGetRunOutput()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListRuns()) + cmd.AddCommand(newRepairRun()) + cmd.AddCommand(newReset()) + cmd.AddCommand(newRunNow()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newSubmit()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -84,7 +106,7 @@ func newCancelAllRuns() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -119,12 +141,6 @@ func newCancelAllRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCancelAllRuns()) - }) -} - // start cancel-run command // Slice with functions to override default command behavior. @@ -162,7 +178,7 @@ func newCancelRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } @@ -243,12 +259,6 @@ func newCancelRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCancelRun()) - }) -} - // start create command // Slice with functions to override default command behavior. @@ -308,12 +318,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -345,7 +349,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input") } @@ -407,12 +411,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start delete-run command // Slice with functions to override default command behavior. @@ -438,13 +436,13 @@ func newDeleteRun() *cobra.Command { Deletes a non-active run. Returns an error if the run is active. Arguments: - RUN_ID: The canonical identifier of the run for which to retrieve the metadata.` + RUN_ID: ID of the run to delete.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } @@ -472,14 +470,14 @@ func newDeleteRun() *cobra.Command { if err != nil { return fmt.Errorf("failed to load names for Jobs drop-down. Please manually specify required arguments. Original error: %w", err) } - id, err := cmdio.Select(ctx, names, "The canonical identifier of the run for which to retrieve the metadata") + id, err := cmdio.Select(ctx, names, "ID of the run to delete") if err != nil { return err } args = append(args, id) } if len(args) != 1 { - return fmt.Errorf("expected to have the canonical identifier of the run for which to retrieve the metadata") + return fmt.Errorf("expected to have id of the run to delete") } _, err = fmt.Sscan(args[0], &deleteRunReq.RunId) if err != nil { @@ -506,12 +504,6 @@ func newDeleteRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteRun()) - }) -} - // start export-run command // Slice with functions to override default command behavior. @@ -587,12 +579,6 @@ func newExportRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newExportRun()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -667,12 +653,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -743,12 +723,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -820,12 +794,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-run command // Slice with functions to override default command behavior. @@ -908,12 +876,6 @@ func newGetRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRun()) - }) -} - // start get-run-output command // Slice with functions to override default command behavior. @@ -946,7 +908,7 @@ func newGetRunOutput() *cobra.Command { 60 days, you must save old run results before they expire. Arguments: - RUN_ID: The canonical identifier for the run. This field is required.` + RUN_ID: The canonical identifier for the run.` cmd.Annotations = make(map[string]string) @@ -996,12 +958,6 @@ func newGetRunOutput() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetRunOutput()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -1033,7 +989,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1042,11 +998,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Jobs.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Jobs.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1061,12 +1014,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start list-runs command // Slice with functions to override default command behavior. @@ -1091,8 +1038,8 @@ func newListRuns() *cobra.Command { cmd.Flags().IntVar(&listRunsReq.Offset, "offset", listRunsReq.Offset, `The offset of the first run to return, relative to the most recent run.`) cmd.Flags().StringVar(&listRunsReq.PageToken, "page-token", listRunsReq.PageToken, `Use next_page_token or prev_page_token returned from the previous request to list the next or previous page of runs respectively.`) cmd.Flags().Var(&listRunsReq.RunType, "run-type", `The type of runs to return. Supported values: [JOB_RUN, SUBMIT_RUN, WORKFLOW_RUN]`) - cmd.Flags().IntVar(&listRunsReq.StartTimeFrom, "start-time-from", listRunsReq.StartTimeFrom, `Show runs that started _at or after_ this value.`) - cmd.Flags().IntVar(&listRunsReq.StartTimeTo, "start-time-to", listRunsReq.StartTimeTo, `Show runs that started _at or before_ this value.`) + cmd.Flags().Int64Var(&listRunsReq.StartTimeFrom, "start-time-from", listRunsReq.StartTimeFrom, `Show runs that started _at or after_ this value.`) + cmd.Flags().Int64Var(&listRunsReq.StartTimeTo, "start-time-to", listRunsReq.StartTimeTo, `Show runs that started _at or before_ this value.`) cmd.Use = "list-runs" cmd.Short = `List job runs.` @@ -1103,7 +1050,7 @@ func newListRuns() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1112,11 +1059,8 @@ func newListRuns() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Jobs.ListRunsAll(ctx, listRunsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Jobs.ListRuns(ctx, listRunsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1131,12 +1075,6 @@ func newListRuns() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListRuns()) - }) -} - // start repair-run command // Slice with functions to override default command behavior. @@ -1189,7 +1127,7 @@ func newRepairRun() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'run_id' in your JSON input") } @@ -1270,12 +1208,6 @@ func newRepairRun() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRepairRun()) - }) -} - // start reset command // Slice with functions to override default command behavior. @@ -1336,12 +1268,6 @@ func newReset() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReset()) - }) -} - // start run-now command // Slice with functions to override default command behavior. @@ -1390,7 +1316,7 @@ func newRunNow() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input") } @@ -1471,12 +1397,6 @@ func newRunNow() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRunNow()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -1558,12 +1478,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start submit command // Slice with functions to override default command behavior. @@ -1589,11 +1503,13 @@ func newSubmit() *cobra.Command { // TODO: array: access_control_list // TODO: complex arg: email_notifications + // TODO: array: environments // TODO: complex arg: git_source // TODO: complex arg: health cmd.Flags().StringVar(&submitReq.IdempotencyToken, "idempotency-token", submitReq.IdempotencyToken, `An optional token that can be used to guarantee the idempotency of job run requests.`) // TODO: complex arg: notification_settings // TODO: complex arg: queue + // TODO: complex arg: run_as cmd.Flags().StringVar(&submitReq.RunName, "run-name", submitReq.RunName, `An optional name for the run.`) // TODO: array: tasks cmd.Flags().IntVar(&submitReq.TimeoutSeconds, "timeout-seconds", submitReq.TimeoutSeconds, `An optional timeout applied to each run of this job.`) @@ -1611,7 +1527,7 @@ func newSubmit() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1665,12 +1581,6 @@ func newSubmit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSubmit()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -1706,7 +1616,7 @@ func newUpdate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input") } @@ -1768,12 +1678,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1855,10 +1759,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Jobs diff --git a/cmd/workspace/jobs/overrides.go b/cmd/workspace/jobs/overrides.go index fd22dcbdb..ee7d20551 100644 --- a/cmd/workspace/jobs/overrides.go +++ b/cmd/workspace/jobs/overrides.go @@ -13,8 +13,9 @@ func listOverride(listCmd *cobra.Command, listReq *jobs.ListJobsRequest) { } func listRunsOverride(listRunsCmd *cobra.Command, listRunsReq *jobs.ListRunsRequest) { + listRunsCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Job ID"}} {{header "Run ID"}} {{header "Result State"}} URL`) listRunsCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Job ID"}} {{header "Run ID"}} {{header "Result State"}} URL {{range .}}{{green "%d" .JobId}} {{cyan "%d" .RunId}} {{if eq .State.ResultState "SUCCESS"}}{{"SUCCESS"|green}}{{else}}{{red "%s" .State.ResultState}}{{end}} {{.RunPageUrl}} {{end}}`) } diff --git a/cmd/workspace/lakeview/lakeview.go b/cmd/workspace/lakeview/lakeview.go index 67f1811d3..36eab0e7f 100755 --- a/cmd/workspace/lakeview/lakeview.go +++ b/cmd/workspace/lakeview/lakeview.go @@ -3,7 +3,10 @@ package lakeview import ( + "fmt" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/databricks/databricks-sdk-go/service/dashboards" "github.com/spf13/cobra" @@ -24,11 +27,28 @@ func New() *cobra.Command { Annotations: map[string]string{ "package": "dashboards", }, - - // This service is being previewed; hide from help output. - Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newCreateSchedule()) + cmd.AddCommand(newCreateSubscription()) + cmd.AddCommand(newDeleteSchedule()) + cmd.AddCommand(newDeleteSubscription()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPublished()) + cmd.AddCommand(newGetSchedule()) + cmd.AddCommand(newGetSubscription()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListSchedules()) + cmd.AddCommand(newListSubscriptions()) + cmd.AddCommand(newMigrate()) + cmd.AddCommand(newPublish()) + cmd.AddCommand(newTrash()) + cmd.AddCommand(newUnpublish()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdateSchedule()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -37,6 +57,847 @@ func New() *cobra.Command { return cmd } +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *dashboards.CreateDashboardRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq dashboards.CreateDashboardRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.ParentPath, "parent-path", createReq.ParentPath, `The workspace path of the folder containing the dashboard.`) + cmd.Flags().StringVar(&createReq.SerializedDashboard, "serialized-dashboard", createReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`) + cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `The warehouse ID used to run the dashboard.`) + + cmd.Use = "create DISPLAY_NAME" + cmd.Short = `Create dashboard.` + cmd.Long = `Create dashboard. + + Create a draft dashboard. + + Arguments: + DISPLAY_NAME: The display name of the dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'display_name' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + createReq.DisplayName = args[0] + } + + response, err := w.Lakeview.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start create-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createScheduleOverrides []func( + *cobra.Command, + *dashboards.CreateScheduleRequest, +) + +func newCreateSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var createScheduleReq dashboards.CreateScheduleRequest + var createScheduleJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createScheduleReq.DisplayName, "display-name", createScheduleReq.DisplayName, `The display name for schedule.`) + cmd.Flags().Var(&createScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + + cmd.Use = "create-schedule DASHBOARD_ID" + cmd.Short = `Create dashboard schedule.` + cmd.Long = `Create dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createScheduleJson.Unmarshal(&createScheduleReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createScheduleReq.DashboardId = args[0] + + response, err := w.Lakeview.CreateSchedule(ctx, createScheduleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createScheduleOverrides { + fn(cmd, &createScheduleReq) + } + + return cmd +} + +// start create-subscription command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createSubscriptionOverrides []func( + *cobra.Command, + *dashboards.CreateSubscriptionRequest, +) + +func newCreateSubscription() *cobra.Command { + cmd := &cobra.Command{} + + var createSubscriptionReq dashboards.CreateSubscriptionRequest + var createSubscriptionJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createSubscriptionJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create-subscription DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Create schedule subscription.` + cmd.Long = `Create schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createSubscriptionJson.Unmarshal(&createSubscriptionReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + createSubscriptionReq.DashboardId = args[0] + createSubscriptionReq.ScheduleId = args[1] + + response, err := w.Lakeview.CreateSubscription(ctx, createSubscriptionReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createSubscriptionOverrides { + fn(cmd, &createSubscriptionReq) + } + + return cmd +} + +// start delete-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteScheduleOverrides []func( + *cobra.Command, + *dashboards.DeleteScheduleRequest, +) + +func newDeleteSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var deleteScheduleReq dashboards.DeleteScheduleRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteScheduleReq.Etag, "etag", deleteScheduleReq.Etag, `The etag for the schedule.`) + + cmd.Use = "delete-schedule DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Delete dashboard schedule.` + cmd.Long = `Delete dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteScheduleReq.DashboardId = args[0] + deleteScheduleReq.ScheduleId = args[1] + + err = w.Lakeview.DeleteSchedule(ctx, deleteScheduleReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteScheduleOverrides { + fn(cmd, &deleteScheduleReq) + } + + return cmd +} + +// start delete-subscription command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteSubscriptionOverrides []func( + *cobra.Command, + *dashboards.DeleteSubscriptionRequest, +) + +func newDeleteSubscription() *cobra.Command { + cmd := &cobra.Command{} + + var deleteSubscriptionReq dashboards.DeleteSubscriptionRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteSubscriptionReq.Etag, "etag", deleteSubscriptionReq.Etag, `The etag for the subscription.`) + + cmd.Use = "delete-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID" + cmd.Short = `Delete schedule subscription.` + cmd.Long = `Delete schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. + SUBSCRIPTION_ID: UUID identifying the subscription.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteSubscriptionReq.DashboardId = args[0] + deleteSubscriptionReq.ScheduleId = args[1] + deleteSubscriptionReq.SubscriptionId = args[2] + + err = w.Lakeview.DeleteSubscription(ctx, deleteSubscriptionReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteSubscriptionOverrides { + fn(cmd, &deleteSubscriptionReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *dashboards.GetDashboardRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq dashboards.GetDashboardRequest + + // TODO: short flags + + cmd.Use = "get DASHBOARD_ID" + cmd.Short = `Get dashboard.` + cmd.Long = `Get dashboard. + + Get a draft dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.DashboardId = args[0] + + response, err := w.Lakeview.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start get-published command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getPublishedOverrides []func( + *cobra.Command, + *dashboards.GetPublishedDashboardRequest, +) + +func newGetPublished() *cobra.Command { + cmd := &cobra.Command{} + + var getPublishedReq dashboards.GetPublishedDashboardRequest + + // TODO: short flags + + cmd.Use = "get-published DASHBOARD_ID" + cmd.Short = `Get published dashboard.` + cmd.Long = `Get published dashboard. + + Get the current published dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to be published.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getPublishedReq.DashboardId = args[0] + + response, err := w.Lakeview.GetPublished(ctx, getPublishedReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getPublishedOverrides { + fn(cmd, &getPublishedReq) + } + + return cmd +} + +// start get-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getScheduleOverrides []func( + *cobra.Command, + *dashboards.GetScheduleRequest, +) + +func newGetSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var getScheduleReq dashboards.GetScheduleRequest + + // TODO: short flags + + cmd.Use = "get-schedule DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Get dashboard schedule.` + cmd.Long = `Get dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getScheduleReq.DashboardId = args[0] + getScheduleReq.ScheduleId = args[1] + + response, err := w.Lakeview.GetSchedule(ctx, getScheduleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getScheduleOverrides { + fn(cmd, &getScheduleReq) + } + + return cmd +} + +// start get-subscription command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getSubscriptionOverrides []func( + *cobra.Command, + *dashboards.GetSubscriptionRequest, +) + +func newGetSubscription() *cobra.Command { + cmd := &cobra.Command{} + + var getSubscriptionReq dashboards.GetSubscriptionRequest + + // TODO: short flags + + cmd.Use = "get-subscription DASHBOARD_ID SCHEDULE_ID SUBSCRIPTION_ID" + cmd.Short = `Get schedule subscription.` + cmd.Long = `Get schedule subscription. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule which the subscription belongs. + SUBSCRIPTION_ID: UUID identifying the subscription.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getSubscriptionReq.DashboardId = args[0] + getSubscriptionReq.ScheduleId = args[1] + getSubscriptionReq.SubscriptionId = args[2] + + response, err := w.Lakeview.GetSubscription(ctx, getSubscriptionReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getSubscriptionOverrides { + fn(cmd, &getSubscriptionReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *dashboards.ListDashboardsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq dashboards.ListDashboardsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, `The number of dashboards to return per page.`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A page token, received from a previous ListDashboards call.`) + cmd.Flags().BoolVar(&listReq.ShowTrashed, "show-trashed", listReq.ShowTrashed, `The flag to include dashboards located in the trash.`) + cmd.Flags().Var(&listReq.View, "view", `Indicates whether to include all metadata from the dashboard in the response. Supported values: [DASHBOARD_VIEW_BASIC, DASHBOARD_VIEW_FULL]`) + + cmd.Use = "list" + cmd.Short = `List dashboards.` + cmd.Long = `List dashboards.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.Lakeview.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start list-schedules command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listSchedulesOverrides []func( + *cobra.Command, + *dashboards.ListSchedulesRequest, +) + +func newListSchedules() *cobra.Command { + cmd := &cobra.Command{} + + var listSchedulesReq dashboards.ListSchedulesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listSchedulesReq.PageSize, "page-size", listSchedulesReq.PageSize, `The number of schedules to return per page.`) + cmd.Flags().StringVar(&listSchedulesReq.PageToken, "page-token", listSchedulesReq.PageToken, `A page token, received from a previous ListSchedules call.`) + + cmd.Use = "list-schedules DASHBOARD_ID" + cmd.Short = `List dashboard schedules.` + cmd.Long = `List dashboard schedules. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listSchedulesReq.DashboardId = args[0] + + response := w.Lakeview.ListSchedules(ctx, listSchedulesReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listSchedulesOverrides { + fn(cmd, &listSchedulesReq) + } + + return cmd +} + +// start list-subscriptions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listSubscriptionsOverrides []func( + *cobra.Command, + *dashboards.ListSubscriptionsRequest, +) + +func newListSubscriptions() *cobra.Command { + cmd := &cobra.Command{} + + var listSubscriptionsReq dashboards.ListSubscriptionsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listSubscriptionsReq.PageSize, "page-size", listSubscriptionsReq.PageSize, `The number of subscriptions to return per page.`) + cmd.Flags().StringVar(&listSubscriptionsReq.PageToken, "page-token", listSubscriptionsReq.PageToken, `A page token, received from a previous ListSubscriptions call.`) + + cmd.Use = "list-subscriptions DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `List schedule subscriptions.` + cmd.Long = `List schedule subscriptions. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the subscription belongs. + SCHEDULE_ID: UUID identifying the schedule to which the subscription belongs.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listSubscriptionsReq.DashboardId = args[0] + listSubscriptionsReq.ScheduleId = args[1] + + response := w.Lakeview.ListSubscriptions(ctx, listSubscriptionsReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listSubscriptionsOverrides { + fn(cmd, &listSubscriptionsReq) + } + + return cmd +} + +// start migrate command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var migrateOverrides []func( + *cobra.Command, + *dashboards.MigrateDashboardRequest, +) + +func newMigrate() *cobra.Command { + cmd := &cobra.Command{} + + var migrateReq dashboards.MigrateDashboardRequest + var migrateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&migrateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&migrateReq.DisplayName, "display-name", migrateReq.DisplayName, `Display name for the new Lakeview dashboard.`) + cmd.Flags().StringVar(&migrateReq.ParentPath, "parent-path", migrateReq.ParentPath, `The workspace path of the folder to contain the migrated Lakeview dashboard.`) + + cmd.Use = "migrate SOURCE_DASHBOARD_ID" + cmd.Short = `Migrate dashboard.` + cmd.Long = `Migrate dashboard. + + Migrates a classic SQL dashboard to Lakeview. + + Arguments: + SOURCE_DASHBOARD_ID: UUID of the dashboard to be migrated.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'source_dashboard_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = migrateJson.Unmarshal(&migrateReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + migrateReq.SourceDashboardId = args[0] + } + + response, err := w.Lakeview.Migrate(ctx, migrateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range migrateOverrides { + fn(cmd, &migrateReq) + } + + return cmd +} + // start publish command // Slice with functions to override default command behavior. @@ -70,7 +931,7 @@ func newPublish() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -87,11 +948,11 @@ func newPublish() *cobra.Command { } publishReq.DashboardId = args[0] - err = w.Lakeview.Publish(ctx, publishReq) + response, err := w.Lakeview.Publish(ctx, publishReq) if err != nil { return err } - return nil + return cmdio.Render(ctx, response) } // Disable completions since they are not applicable. @@ -106,10 +967,266 @@ func newPublish() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPublish()) - }) +// start trash command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var trashOverrides []func( + *cobra.Command, + *dashboards.TrashDashboardRequest, +) + +func newTrash() *cobra.Command { + cmd := &cobra.Command{} + + var trashReq dashboards.TrashDashboardRequest + + // TODO: short flags + + cmd.Use = "trash DASHBOARD_ID" + cmd.Short = `Trash dashboard.` + cmd.Long = `Trash dashboard. + + Trash a dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + trashReq.DashboardId = args[0] + + err = w.Lakeview.Trash(ctx, trashReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range trashOverrides { + fn(cmd, &trashReq) + } + + return cmd +} + +// start unpublish command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var unpublishOverrides []func( + *cobra.Command, + *dashboards.UnpublishDashboardRequest, +) + +func newUnpublish() *cobra.Command { + cmd := &cobra.Command{} + + var unpublishReq dashboards.UnpublishDashboardRequest + + // TODO: short flags + + cmd.Use = "unpublish DASHBOARD_ID" + cmd.Short = `Unpublish dashboard.` + cmd.Long = `Unpublish dashboard. + + Unpublish the dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to be published.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + unpublishReq.DashboardId = args[0] + + err = w.Lakeview.Unpublish(ctx, unpublishReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range unpublishOverrides { + fn(cmd, &unpublishReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *dashboards.UpdateDashboardRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq dashboards.UpdateDashboardRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.DisplayName, "display-name", updateReq.DisplayName, `The display name of the dashboard.`) + cmd.Flags().StringVar(&updateReq.Etag, "etag", updateReq.Etag, `The etag for the dashboard.`) + cmd.Flags().StringVar(&updateReq.SerializedDashboard, "serialized-dashboard", updateReq.SerializedDashboard, `The contents of the dashboard in serialized string form.`) + cmd.Flags().StringVar(&updateReq.WarehouseId, "warehouse-id", updateReq.WarehouseId, `The warehouse ID used to run the dashboard.`) + + cmd.Use = "update DASHBOARD_ID" + cmd.Short = `Update dashboard.` + cmd.Long = `Update dashboard. + + Update a draft dashboard. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.DashboardId = args[0] + + response, err := w.Lakeview.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// start update-schedule command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateScheduleOverrides []func( + *cobra.Command, + *dashboards.UpdateScheduleRequest, +) + +func newUpdateSchedule() *cobra.Command { + cmd := &cobra.Command{} + + var updateScheduleReq dashboards.UpdateScheduleRequest + var updateScheduleJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateScheduleJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateScheduleReq.DisplayName, "display-name", updateScheduleReq.DisplayName, `The display name for schedule.`) + cmd.Flags().StringVar(&updateScheduleReq.Etag, "etag", updateScheduleReq.Etag, `The etag for the schedule.`) + cmd.Flags().Var(&updateScheduleReq.PauseStatus, "pause-status", `The status indicates whether this schedule is paused or not. Supported values: [PAUSED, UNPAUSED]`) + + cmd.Use = "update-schedule DASHBOARD_ID SCHEDULE_ID" + cmd.Short = `Update dashboard schedule.` + cmd.Long = `Update dashboard schedule. + + Arguments: + DASHBOARD_ID: UUID identifying the dashboard to which the schedule belongs. + SCHEDULE_ID: UUID identifying the schedule.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateScheduleJson.Unmarshal(&updateScheduleReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateScheduleReq.DashboardId = args[0] + updateScheduleReq.ScheduleId = args[1] + + response, err := w.Lakeview.UpdateSchedule(ctx, updateScheduleReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateScheduleOverrides { + fn(cmd, &updateScheduleReq) + } + + return cmd } // end service Lakeview diff --git a/cmd/workspace/libraries/libraries.go b/cmd/workspace/libraries/libraries.go index 1e742892d..2c10d8161 100755 --- a/cmd/workspace/libraries/libraries.go +++ b/cmd/workspace/libraries/libraries.go @@ -25,18 +25,14 @@ func New() *cobra.Command { To make third-party or custom code available to notebooks and jobs running on your clusters, you can install a library. Libraries can be written in Python, - Java, Scala, and R. You can upload Java, Scala, and Python libraries and point - to external packages in PyPI, Maven, and CRAN repositories. + Java, Scala, and R. You can upload Python, Java, Scala and R libraries and + point to external packages in PyPI, Maven, and CRAN repositories. Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster library directly from a public repository such as PyPI or Maven, using a previously installed workspace library, or using an init script. - When you install a library on a cluster, a notebook already attached to that - cluster will not immediately see the new library. You must first detach and - then reattach the notebook to the cluster. - When you uninstall a library from a cluster, the library is removed only when you restart the cluster. Until you restart the cluster, the status of the uninstalled library appears as Uninstall pending restart.`, @@ -46,6 +42,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newAllClusterStatuses()) + cmd.AddCommand(newClusterStatus()) + cmd.AddCommand(newInstall()) + cmd.AddCommand(newUninstall()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -69,9 +71,8 @@ func newAllClusterStatuses() *cobra.Command { cmd.Short = `Get all statuses.` cmd.Long = `Get all statuses. - Get the status of all libraries on all clusters. A status will be available - for all libraries installed on this cluster via the API or the libraries UI as - well as libraries set to be installed on all clusters via the libraries UI.` + Get the status of all libraries on all clusters. A status is returned for all + libraries installed on this cluster via the API or the libraries UI.` cmd.Annotations = make(map[string]string) @@ -79,11 +80,8 @@ func newAllClusterStatuses() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Libraries.AllClusterStatuses(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Libraries.AllClusterStatuses(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -98,25 +96,19 @@ func newAllClusterStatuses() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newAllClusterStatuses()) - }) -} - // start cluster-status command // Slice with functions to override default command behavior. // Functions can be added from the `init()` function in manually curated files in this directory. var clusterStatusOverrides []func( *cobra.Command, - *compute.ClusterStatusRequest, + *compute.ClusterStatus, ) func newClusterStatus() *cobra.Command { cmd := &cobra.Command{} - var clusterStatusReq compute.ClusterStatusRequest + var clusterStatusReq compute.ClusterStatus // TODO: short flags @@ -124,21 +116,13 @@ func newClusterStatus() *cobra.Command { cmd.Short = `Get status.` cmd.Long = `Get status. - Get the status of libraries on a cluster. A status will be available for all - libraries installed on this cluster via the API or the libraries UI as well as - libraries set to be installed on all clusters via the libraries UI. The order - of returned libraries will be as follows. - - 1. Libraries set to be installed on this cluster will be returned first. - Within this group, the final order will be order in which the libraries were - added to the cluster. - - 2. Libraries set to be installed on all clusters are returned next. Within - this group there is no order guarantee. - - 3. Libraries that were previously requested on this cluster or on all - clusters, but now marked for removal. Within this group there is no order - guarantee. + Get the status of libraries on a cluster. A status is returned for all + libraries installed on this cluster via the API or the libraries UI. The order + of returned libraries is as follows: 1. Libraries set to be installed on this + cluster, in the order that the libraries were added to the cluster, are + returned first. 2. Libraries that were previously requested to be installed on + this cluster or, but are now marked for removal, in no particular order, are + returned last. Arguments: CLUSTER_ID: Unique identifier of the cluster whose status should be retrieved.` @@ -146,7 +130,7 @@ func newClusterStatus() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -157,11 +141,8 @@ func newClusterStatus() *cobra.Command { clusterStatusReq.ClusterId = args[0] - response, err := w.Libraries.ClusterStatusAll(ctx, clusterStatusReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Libraries.ClusterStatus(ctx, clusterStatusReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -176,12 +157,6 @@ func newClusterStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newClusterStatus()) - }) -} - // start install command // Slice with functions to override default command behavior. @@ -204,12 +179,8 @@ func newInstall() *cobra.Command { cmd.Short = `Add a library.` cmd.Long = `Add a library. - Add libraries to be installed on a cluster. The installation is asynchronous; - it happens in the background after the completion of this request. - - **Note**: The actual set of libraries to be installed on a cluster is the - union of the libraries specified via this method and the libraries set to be - installed on all clusters via the libraries UI.` + Add libraries to install on a cluster. The installation is asynchronous; it + happens in the background after the completion of this request.` cmd.Annotations = make(map[string]string) @@ -246,12 +217,6 @@ func newInstall() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newInstall()) - }) -} - // start uninstall command // Slice with functions to override default command behavior. @@ -274,9 +239,9 @@ func newUninstall() *cobra.Command { cmd.Short = `Uninstall libraries.` cmd.Long = `Uninstall libraries. - Set libraries to be uninstalled on a cluster. The libraries won't be - uninstalled until the cluster is restarted. Uninstalling libraries that are - not installed on the cluster will have no impact but is not an error.` + Set libraries to uninstall from a cluster. The libraries won't be uninstalled + until the cluster is restarted. A request to uninstall a library that is not + currently installed is ignored.` cmd.Annotations = make(map[string]string) @@ -313,10 +278,4 @@ func newUninstall() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUninstall()) - }) -} - // end service Libraries diff --git a/cmd/workspace/metastores/metastores.go b/cmd/workspace/metastores/metastores.go index a0e03ad0d..dd40bf92b 100755 --- a/cmd/workspace/metastores/metastores.go +++ b/cmd/workspace/metastores/metastores.go @@ -39,6 +39,18 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newAssign()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newCurrent()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSummary()) + cmd.AddCommand(newUnassign()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdateAssignment()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -82,13 +94,13 @@ func newAssign() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only WORKSPACE_ID as positional arguments. Provide 'metastore_id', 'default_catalog_name' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -133,12 +145,6 @@ func newAssign() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newAssign()) - }) -} - // start create command // Slice with functions to override default command behavior. @@ -177,13 +183,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -221,12 +227,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start current command // Slice with functions to override default command behavior. @@ -269,12 +269,6 @@ func newCurrent() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCurrent()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -347,12 +341,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -424,12 +412,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -455,11 +437,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Metastores.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Metastores.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -474,12 +453,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start summary command // Slice with functions to override default command behavior. @@ -523,12 +496,6 @@ func newSummary() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSummary()) - }) -} - // start unassign command // Slice with functions to override default command behavior. @@ -558,7 +525,7 @@ func newUnassign() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -592,12 +559,6 @@ func newUnassign() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUnassign()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -619,7 +580,6 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.DeltaSharingOrganizationName, "delta-sharing-organization-name", updateReq.DeltaSharingOrganizationName, `The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name.`) cmd.Flags().Int64Var(&updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, "delta-sharing-recipient-token-lifetime-in-seconds", updateReq.DeltaSharingRecipientTokenLifetimeInSeconds, `The lifetime of delta sharing recipient token in seconds.`) cmd.Flags().Var(&updateReq.DeltaSharingScope, "delta-sharing-scope", `The scope of Delta Sharing enabled for the metastore. Supported values: [INTERNAL, INTERNAL_AND_EXTERNAL]`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The user-specified name of the metastore.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the metastore.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The owner of the metastore.`) cmd.Flags().StringVar(&updateReq.PrivilegeModelVersion, "privilege-model-version", updateReq.PrivilegeModelVersion, `Privilege model version of the metastore, of the form major.minor (e.g., 1.0).`) @@ -687,12 +647,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-assignment command // Slice with functions to override default command behavior. @@ -780,10 +734,4 @@ func newUpdateAssignment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateAssignment()) - }) -} - // end service Metastores diff --git a/cmd/workspace/metastores/overrides.go b/cmd/workspace/metastores/overrides.go index 2c9ca6f79..3ee6a1071 100644 --- a/cmd/workspace/metastores/overrides.go +++ b/cmd/workspace/metastores/overrides.go @@ -6,8 +6,9 @@ import ( ) func listOverride(listCmd *cobra.Command) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{"Region"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{"Region"}} {{range .}}{{.MetastoreId|green}} {{.Name|cyan}} {{.Region}} {{end}}`) } diff --git a/cmd/workspace/model-registry/model-registry.go b/cmd/workspace/model-registry/model-registry.go index fade898ec..41f06ac4d 100755 --- a/cmd/workspace/model-registry/model-registry.go +++ b/cmd/workspace/model-registry/model-registry.go @@ -34,6 +34,44 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newApproveTransitionRequest()) + cmd.AddCommand(newCreateComment()) + cmd.AddCommand(newCreateModel()) + cmd.AddCommand(newCreateModelVersion()) + cmd.AddCommand(newCreateTransitionRequest()) + cmd.AddCommand(newCreateWebhook()) + cmd.AddCommand(newDeleteComment()) + cmd.AddCommand(newDeleteModel()) + cmd.AddCommand(newDeleteModelTag()) + cmd.AddCommand(newDeleteModelVersion()) + cmd.AddCommand(newDeleteModelVersionTag()) + cmd.AddCommand(newDeleteTransitionRequest()) + cmd.AddCommand(newDeleteWebhook()) + cmd.AddCommand(newGetLatestVersions()) + cmd.AddCommand(newGetModel()) + cmd.AddCommand(newGetModelVersion()) + cmd.AddCommand(newGetModelVersionDownloadUri()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newListModels()) + cmd.AddCommand(newListTransitionRequests()) + cmd.AddCommand(newListWebhooks()) + cmd.AddCommand(newRejectTransitionRequest()) + cmd.AddCommand(newRenameModel()) + cmd.AddCommand(newSearchModelVersions()) + cmd.AddCommand(newSearchModels()) + cmd.AddCommand(newSetModelTag()) + cmd.AddCommand(newSetModelVersionTag()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newTestRegistryWebhook()) + cmd.AddCommand(newTransitionStage()) + cmd.AddCommand(newUpdateComment()) + cmd.AddCommand(newUpdateModel()) + cmd.AddCommand(newUpdateModelVersion()) + cmd.AddCommand(newUpdatePermissions()) + cmd.AddCommand(newUpdateWebhook()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -87,13 +125,13 @@ func newApproveTransitionRequest() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage', 'archive_existing_versions' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -146,12 +184,6 @@ func newApproveTransitionRequest() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newApproveTransitionRequest()) - }) -} - // start create-comment command // Slice with functions to override default command behavior. @@ -187,13 +219,13 @@ func newCreateComment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'comment' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -237,12 +269,6 @@ func newCreateComment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateComment()) - }) -} - // start create-model command // Slice with functions to override default command behavior. @@ -280,13 +306,13 @@ func newCreateModel() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -324,12 +350,6 @@ func newCreateModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateModel()) - }) -} - // start create-model-version command // Slice with functions to override default command behavior. @@ -367,13 +387,13 @@ func newCreateModelVersion() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'source' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -414,12 +434,6 @@ func newCreateModelVersion() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateModelVersion()) - }) -} - // start create-transition-request command // Slice with functions to override default command behavior. @@ -463,13 +477,13 @@ func newCreateTransitionRequest() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -516,12 +530,6 @@ func newCreateTransitionRequest() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateTransitionRequest()) - }) -} - // start create-webhook command // Slice with functions to override default command behavior. @@ -589,12 +597,6 @@ func newCreateWebhook() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateWebhook()) - }) -} - // start delete-comment command // Slice with functions to override default command behavior. @@ -620,7 +622,7 @@ func newDeleteComment() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -650,12 +652,6 @@ func newDeleteComment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteComment()) - }) -} - // start delete-model command // Slice with functions to override default command behavior. @@ -684,7 +680,7 @@ func newDeleteModel() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -714,12 +710,6 @@ func newDeleteModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteModel()) - }) -} - // start delete-model-tag command // Slice with functions to override default command behavior. @@ -750,7 +740,7 @@ func newDeleteModelTag() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -781,12 +771,6 @@ func newDeleteModelTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteModelTag()) - }) -} - // start delete-model-version command // Slice with functions to override default command behavior. @@ -816,7 +800,7 @@ func newDeleteModelVersion() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -847,12 +831,6 @@ func newDeleteModelVersion() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteModelVersion()) - }) -} - // start delete-model-version-tag command // Slice with functions to override default command behavior. @@ -884,7 +862,7 @@ func newDeleteModelVersionTag() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -916,12 +894,6 @@ func newDeleteModelVersionTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteModelVersionTag()) - }) -} - // start delete-transition-request command // Slice with functions to override default command behavior. @@ -965,7 +937,7 @@ func newDeleteTransitionRequest() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -1001,12 +973,6 @@ func newDeleteTransitionRequest() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteTransitionRequest()) - }) -} - // start delete-webhook command // Slice with functions to override default command behavior. @@ -1036,7 +1002,7 @@ func newDeleteWebhook() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1064,12 +1030,6 @@ func newDeleteWebhook() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteWebhook()) - }) -} - // start get-latest-versions command // Slice with functions to override default command behavior. @@ -1103,13 +1063,13 @@ func newGetLatestVersions() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1128,11 +1088,8 @@ func newGetLatestVersions() *cobra.Command { getLatestVersionsReq.Name = args[0] } - response, err := w.ModelRegistry.GetLatestVersionsAll(ctx, getLatestVersionsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.GetLatestVersions(ctx, getLatestVersionsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1147,12 +1104,6 @@ func newGetLatestVersions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetLatestVersions()) - }) -} - // start get-model command // Slice with functions to override default command behavior. @@ -1185,7 +1136,7 @@ func newGetModel() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1215,12 +1166,6 @@ func newGetModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetModel()) - }) -} - // start get-model-version command // Slice with functions to override default command behavior. @@ -1250,7 +1195,7 @@ func newGetModelVersion() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1281,12 +1226,6 @@ func newGetModelVersion() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetModelVersion()) - }) -} - // start get-model-version-download-uri command // Slice with functions to override default command behavior. @@ -1316,7 +1255,7 @@ func newGetModelVersionDownloadUri() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1347,12 +1286,6 @@ func newGetModelVersionDownloadUri() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetModelVersionDownloadUri()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -1381,7 +1314,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1411,12 +1344,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -1446,7 +1373,7 @@ func newGetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1476,12 +1403,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list-models command // Slice with functions to override default command behavior. @@ -1511,7 +1432,7 @@ func newListModels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1520,11 +1441,8 @@ func newListModels() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ModelRegistry.ListModelsAll(ctx, listModelsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.ListModels(ctx, listModelsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1539,12 +1457,6 @@ func newListModels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListModels()) - }) -} - // start list-transition-requests command // Slice with functions to override default command behavior. @@ -1574,7 +1486,7 @@ func newListTransitionRequests() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -1586,11 +1498,8 @@ func newListTransitionRequests() *cobra.Command { listTransitionRequestsReq.Name = args[0] listTransitionRequestsReq.Version = args[1] - response, err := w.ModelRegistry.ListTransitionRequestsAll(ctx, listTransitionRequestsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.ListTransitionRequests(ctx, listTransitionRequestsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1605,12 +1514,6 @@ func newListTransitionRequests() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListTransitionRequests()) - }) -} - // start list-webhooks command // Slice with functions to override default command behavior. @@ -1642,7 +1545,7 @@ func newListWebhooks() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1651,11 +1554,8 @@ func newListWebhooks() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ModelRegistry.ListWebhooksAll(ctx, listWebhooksReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.ListWebhooks(ctx, listWebhooksReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1670,12 +1570,6 @@ func newListWebhooks() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListWebhooks()) - }) -} - // start reject-transition-request command // Slice with functions to override default command behavior. @@ -1719,13 +1613,13 @@ func newRejectTransitionRequest() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -1772,12 +1666,6 @@ func newRejectTransitionRequest() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRejectTransitionRequest()) - }) -} - // start rename-model command // Slice with functions to override default command behavior. @@ -1811,13 +1699,13 @@ func newRenameModel() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1855,12 +1743,6 @@ func newRenameModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRenameModel()) - }) -} - // start search-model-versions command // Slice with functions to override default command behavior. @@ -1891,7 +1773,7 @@ func newSearchModelVersions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1900,11 +1782,8 @@ func newSearchModelVersions() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ModelRegistry.SearchModelVersionsAll(ctx, searchModelVersionsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.SearchModelVersions(ctx, searchModelVersionsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1919,12 +1798,6 @@ func newSearchModelVersions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSearchModelVersions()) - }) -} - // start search-models command // Slice with functions to override default command behavior. @@ -1955,7 +1828,7 @@ func newSearchModels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -1964,11 +1837,8 @@ func newSearchModels() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ModelRegistry.SearchModelsAll(ctx, searchModelsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelRegistry.SearchModels(ctx, searchModelsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -1983,12 +1853,6 @@ func newSearchModels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSearchModels()) - }) -} - // start set-model-tag command // Slice with functions to override default command behavior. @@ -2027,13 +1891,13 @@ func newSetModelTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -2077,12 +1941,6 @@ func newSetModelTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetModelTag()) - }) -} - // start set-model-version-tag command // Slice with functions to override default command behavior. @@ -2122,13 +1980,13 @@ func newSetModelVersionTag() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'key', 'value' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -2175,12 +2033,6 @@ func newSetModelVersionTag() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetModelVersionTag()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -2214,7 +2066,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2250,12 +2102,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start test-registry-webhook command // Slice with functions to override default command behavior. @@ -2304,13 +2150,13 @@ func newTestRegistryWebhook() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2348,12 +2194,6 @@ func newTestRegistryWebhook() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newTestRegistryWebhook()) - }) -} - // start transition-stage command // Slice with functions to override default command behavior. @@ -2403,13 +2243,13 @@ func newTransitionStage() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version', 'stage', 'archive_existing_versions' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -2462,12 +2302,6 @@ func newTransitionStage() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newTransitionStage()) - }) -} - // start update-comment command // Slice with functions to override default command behavior. @@ -2500,13 +2334,13 @@ func newUpdateComment() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'id', 'comment' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -2547,12 +2381,6 @@ func newUpdateComment() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateComment()) - }) -} - // start update-model command // Slice with functions to override default command behavior. @@ -2586,13 +2414,13 @@ func newUpdateModel() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2630,12 +2458,6 @@ func newUpdateModel() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateModel()) - }) -} - // start update-model-version command // Slice with functions to override default command behavior. @@ -2670,13 +2492,13 @@ func newUpdateModelVersion() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'version' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -2717,12 +2539,6 @@ func newUpdateModelVersion() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateModelVersion()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -2756,7 +2572,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2792,12 +2608,6 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // start update-webhook command // Slice with functions to override default command behavior. @@ -2837,13 +2647,13 @@ func newUpdateWebhook() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'id' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -2881,10 +2691,4 @@ func newUpdateWebhook() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateWebhook()) - }) -} - // end service ModelRegistry diff --git a/cmd/workspace/model-versions/model-versions.go b/cmd/workspace/model-versions/model-versions.go index 97438264e..034cea2df 100755 --- a/cmd/workspace/model-versions/model-versions.go +++ b/cmd/workspace/model-versions/model-versions.go @@ -33,6 +33,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetByAlias()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -76,7 +83,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -110,12 +117,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -132,6 +133,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include model versions in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get FULL_NAME VERSION" cmd.Short = `Get a Model Version.` cmd.Long = `Get a Model Version. @@ -150,7 +153,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -184,12 +187,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-by-alias command // Slice with functions to override default command behavior. @@ -224,7 +221,7 @@ func newGetByAlias() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -255,12 +252,6 @@ func newGetByAlias() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetByAlias()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -277,6 +268,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include model versions in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of model versions to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -296,6 +288,7 @@ func newList() *cobra.Command { schema. There is no guarantee of a specific ordering of the elements in the response. + The elements in the response will not contain any aliases or tags. Arguments: FULL_NAME: The full three-level name of the registered model under which to list @@ -304,7 +297,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -315,11 +308,8 @@ func newList() *cobra.Command { listReq.FullName = args[0] - response, err := w.ModelVersions.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ModelVersions.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -334,12 +324,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -380,7 +364,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -420,10 +404,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service ModelVersions diff --git a/cmd/workspace/online-tables/online-tables.go b/cmd/workspace/online-tables/online-tables.go new file mode 100755 index 000000000..da2f8c041 --- /dev/null +++ b/cmd/workspace/online-tables/online-tables.go @@ -0,0 +1,225 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package online_tables + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "online-tables", + Short: `Online tables provide lower latency and higher QPS access to data from Delta tables.`, + Long: `Online tables provide lower latency and higher QPS access to data from Delta + tables.`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *catalog.CreateOnlineTableRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq catalog.CreateOnlineTableRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.Name, "name", createReq.Name, `Full three-part (catalog, schema, table) name of the table.`) + // TODO: complex arg: spec + + cmd.Use = "create" + cmd.Short = `Create an Online Table.` + cmd.Long = `Create an Online Table. + + Create a new Online Table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + + response, err := w.OnlineTables.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *catalog.DeleteOnlineTableRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq catalog.DeleteOnlineTableRequest + + // TODO: short flags + + cmd.Use = "delete NAME" + cmd.Short = `Delete an Online Table.` + cmd.Long = `Delete an Online Table. + + Delete an online table. Warning: This will delete all the data in the online + table. If the source Delta table was deleted or modified since this Online + Table was created, this will lose the data forever! + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Name = args[0] + + err = w.OnlineTables.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *catalog.GetOnlineTableRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq catalog.GetOnlineTableRequest + + // TODO: short flags + + cmd.Use = "get NAME" + cmd.Short = `Get an Online Table.` + cmd.Long = `Get an Online Table. + + Get information about an existing online table and its status. + + Arguments: + NAME: Full three-part (catalog, schema, table) name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.Name = args[0] + + response, err := w.OnlineTables.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// end service OnlineTables diff --git a/cmd/workspace/permission-migration/permission-migration.go b/cmd/workspace/permission-migration/permission-migration.go new file mode 100755 index 000000000..40d3f9a3b --- /dev/null +++ b/cmd/workspace/permission-migration/permission-migration.go @@ -0,0 +1,136 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package permission_migration + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "permission-migration", + Short: `This spec contains undocumented permission migration APIs used in https://github.com/databrickslabs/ucx.`, + Long: `This spec contains undocumented permission migration APIs used in + https://github.com/databrickslabs/ucx.`, + GroupID: "iam", + Annotations: map[string]string{ + "package": "iam", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newMigratePermissions()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start migrate-permissions command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var migratePermissionsOverrides []func( + *cobra.Command, + *iam.PermissionMigrationRequest, +) + +func newMigratePermissions() *cobra.Command { + cmd := &cobra.Command{} + + var migratePermissionsReq iam.PermissionMigrationRequest + var migratePermissionsJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&migratePermissionsJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().IntVar(&migratePermissionsReq.Size, "size", migratePermissionsReq.Size, `The maximum number of permissions that will be migrated.`) + + cmd.Use = "migrate-permissions WORKSPACE_ID FROM_WORKSPACE_GROUP_NAME TO_ACCOUNT_GROUP_NAME" + cmd.Short = `Migrate Permissions.` + cmd.Long = `Migrate Permissions. + + Migrate a batch of permissions from a workspace local group to an account + group. + + Arguments: + WORKSPACE_ID: WorkspaceId of the associated workspace where the permission migration + will occur. Both workspace group and account group must be in this + workspace. + FROM_WORKSPACE_GROUP_NAME: The name of the workspace group that permissions will be migrated from. + TO_ACCOUNT_GROUP_NAME: The name of the account group that permissions will be migrated to.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'workspace_id', 'from_workspace_group_name', 'to_account_group_name' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = migratePermissionsJson.Unmarshal(&migratePermissionsReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[0], &migratePermissionsReq.WorkspaceId) + if err != nil { + return fmt.Errorf("invalid WORKSPACE_ID: %s", args[0]) + } + } + if !cmd.Flags().Changed("json") { + migratePermissionsReq.FromWorkspaceGroupName = args[1] + } + if !cmd.Flags().Changed("json") { + migratePermissionsReq.ToAccountGroupName = args[2] + } + + response, err := w.PermissionMigration.MigratePermissions(ctx, migratePermissionsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range migratePermissionsOverrides { + fn(cmd, &migratePermissionsReq) + } + + return cmd +} + +// end service PermissionMigration diff --git a/cmd/workspace/permissions/permissions.go b/cmd/workspace/permissions/permissions.go index 8aeb3fc73..57a7d1e5e 100755 --- a/cmd/workspace/permissions/permissions.go +++ b/cmd/workspace/permissions/permissions.go @@ -64,6 +64,9 @@ func New() *cobra.Command { For the mapping of the required permissions for specific actions or abilities and other important information, see [Access Control]. + Note that to manage access control on service principals, use **[Account + Access Control Proxy](:service:accountaccesscontrolproxy)**. + [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html`, GroupID: "iam", Annotations: map[string]string{ @@ -71,6 +74,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newSet()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -106,13 +115,13 @@ func newGet() *cobra.Command { REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: authorization, clusters, cluster-policies, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, registered-models, - repos, serving-endpoints, or sql-warehouses. + repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -143,12 +152,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -178,7 +181,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -209,12 +212,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start set command // Slice with functions to override default command behavior. @@ -246,13 +243,13 @@ func newSet() *cobra.Command { REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: authorization, clusters, cluster-policies, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, registered-models, - repos, serving-endpoints, or sql-warehouses. + repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -289,12 +286,6 @@ func newSet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSet()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -326,13 +317,13 @@ func newUpdate() *cobra.Command { REQUEST_OBJECT_TYPE: The type of the request object. Can be one of the following: authorization, clusters, cluster-policies, directories, experiments, files, instance-pools, jobs, notebooks, pipelines, registered-models, - repos, serving-endpoints, or sql-warehouses. + repos, serving-endpoints, or warehouses. REQUEST_OBJECT_ID: The id of the request object.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -369,10 +360,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Permissions diff --git a/cmd/workspace/pipelines/pipelines.go b/cmd/workspace/pipelines/pipelines.go index d35eb3cd8..f1cc4e3f7 100755 --- a/cmd/workspace/pipelines/pipelines.go +++ b/cmd/workspace/pipelines/pipelines.go @@ -41,6 +41,22 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetUpdate()) + cmd.AddCommand(newListPipelineEvents()) + cmd.AddCommand(newListPipelines()) + cmd.AddCommand(newListUpdates()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newStartUpdate()) + cmd.AddCommand(newStop()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -109,12 +125,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -182,12 +192,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -258,12 +262,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -334,12 +332,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -411,12 +403,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-update command // Slice with functions to override default command behavior. @@ -446,7 +432,7 @@ func newGetUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -477,12 +463,6 @@ func newGetUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetUpdate()) - }) -} - // start list-pipeline-events command // Slice with functions to override default command behavior. @@ -536,11 +516,8 @@ func newListPipelineEvents() *cobra.Command { } listPipelineEventsReq.PipelineId = args[0] - response, err := w.Pipelines.ListPipelineEventsAll(ctx, listPipelineEventsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Pipelines.ListPipelineEvents(ctx, listPipelineEventsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -555,12 +532,6 @@ func newListPipelineEvents() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListPipelineEvents()) - }) -} - // start list-pipelines command // Slice with functions to override default command behavior. @@ -591,7 +562,7 @@ func newListPipelines() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -600,11 +571,8 @@ func newListPipelines() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Pipelines.ListPipelinesAll(ctx, listPipelinesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Pipelines.ListPipelines(ctx, listPipelinesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -619,12 +587,6 @@ func newListPipelines() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListPipelines()) - }) -} - // start list-updates command // Slice with functions to override default command behavior. @@ -699,102 +661,6 @@ func newListUpdates() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListUpdates()) - }) -} - -// start reset command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var resetOverrides []func( - *cobra.Command, - *pipelines.ResetRequest, -) - -func newReset() *cobra.Command { - cmd := &cobra.Command{} - - var resetReq pipelines.ResetRequest - - var resetSkipWait bool - var resetTimeout time.Duration - - cmd.Flags().BoolVar(&resetSkipWait, "no-wait", resetSkipWait, `do not wait to reach RUNNING state`) - cmd.Flags().DurationVar(&resetTimeout, "timeout", 20*time.Minute, `maximum amount of time to reach RUNNING state`) - // TODO: short flags - - cmd.Use = "reset PIPELINE_ID" - cmd.Short = `Reset a pipeline.` - cmd.Long = `Reset a pipeline. - - Resets a pipeline.` - - cmd.Annotations = make(map[string]string) - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if len(args) == 0 { - promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No PIPELINE_ID argument specified. Loading names for Pipelines drop-down." - names, err := w.Pipelines.PipelineStateInfoNameToPipelineIdMap(ctx, pipelines.ListPipelinesRequest{}) - close(promptSpinner) - if err != nil { - return fmt.Errorf("failed to load names for Pipelines drop-down. Please manually specify required arguments. Original error: %w", err) - } - id, err := cmdio.Select(ctx, names, "") - if err != nil { - return err - } - args = append(args, id) - } - if len(args) != 1 { - return fmt.Errorf("expected to have ") - } - resetReq.PipelineId = args[0] - - wait, err := w.Pipelines.Reset(ctx, resetReq) - if err != nil { - return err - } - if resetSkipWait { - return nil - } - spinner := cmdio.Spinner(ctx) - info, err := wait.OnProgress(func(i *pipelines.GetPipelineResponse) { - statusMessage := i.Cause - spinner <- statusMessage - }).GetWithTimeout(resetTimeout) - close(spinner) - if err != nil { - return err - } - return cmdio.Render(ctx, info) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range resetOverrides { - fn(cmd, &resetReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReset()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -876,12 +742,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start start-update command // Slice with functions to override default command behavior. @@ -911,6 +771,7 @@ func newStartUpdate() *cobra.Command { cmd.Flags().BoolVar(&startUpdateReq.FullRefresh, "full-refresh", startUpdateReq.FullRefresh, `If true, this update will reset all tables before running.`) // TODO: array: full_refresh_selection // TODO: array: refresh_selection + cmd.Flags().BoolVar(&startUpdateReq.ValidateOnly, "validate-only", startUpdateReq.ValidateOnly, `If true, this update only validates the correctness of pipeline source code but does not materialize or publish any datasets.`) cmd.Use = "start-update PIPELINE_ID" cmd.Short = `Start a pipeline.` @@ -970,12 +831,6 @@ func newStartUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStartUpdate()) - }) -} - // start stop command // Slice with functions to override default command behavior. @@ -1061,12 +916,6 @@ func newStop() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStop()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -1091,11 +940,14 @@ func newUpdate() *cobra.Command { // TODO: array: clusters // TODO: map via StringToStringVar: configuration cmd.Flags().BoolVar(&updateReq.Continuous, "continuous", updateReq.Continuous, `Whether the pipeline is continuous or triggered.`) + // TODO: complex arg: deployment cmd.Flags().BoolVar(&updateReq.Development, "development", updateReq.Development, `Whether the pipeline is in Development mode.`) cmd.Flags().StringVar(&updateReq.Edition, "edition", updateReq.Edition, `Pipeline product edition.`) cmd.Flags().Int64Var(&updateReq.ExpectedLastModified, "expected-last-modified", updateReq.ExpectedLastModified, `If present, the last-modified time of the pipeline settings before the edit.`) // TODO: complex arg: filters + // TODO: complex arg: gateway_definition cmd.Flags().StringVar(&updateReq.Id, "id", updateReq.Id, `Unique identifier for this pipeline.`) + // TODO: complex arg: ingestion_definition // TODO: array: libraries cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Friendly identifier for this pipeline.`) // TODO: array: notifications @@ -1166,12 +1018,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1253,10 +1099,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Pipelines diff --git a/cmd/workspace/policy-families/policy-families.go b/cmd/workspace/policy-families/policy-families.go index 75ab862a7..beee6e963 100755 --- a/cmd/workspace/policy-families/policy-families.go +++ b/cmd/workspace/policy-families/policy-families.go @@ -32,6 +32,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -65,7 +69,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -95,12 +99,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -129,7 +127,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -138,11 +136,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.PolicyFamilies.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.PolicyFamilies.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -157,10 +152,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service PolicyFamilies diff --git a/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go new file mode 100755 index 000000000..4ab36b5d0 --- /dev/null +++ b/cmd/workspace/provider-exchange-filters/provider-exchange-filters.go @@ -0,0 +1,293 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_exchange_filters + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-exchange-filters", + Short: `Marketplace exchanges filters curate which groups can access an exchange.`, + Long: `Marketplace exchanges filters curate which groups can access an exchange.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateExchangeFilterRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateExchangeFilterRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Create a new exchange filter.` + cmd.Long = `Create a new exchange filter. + + Add an exchange filter.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderExchangeFilters.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteExchangeFilterRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteExchangeFilterRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Delete an exchange filter.` + cmd.Long = `Delete an exchange filter. + + Delete an exchange filter` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Exchange Filters drop-down." + names, err := w.ProviderExchangeFilters.ExchangeFilterNameToIdMap(ctx, marketplace.ListExchangeFiltersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Exchange Filters drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.Id = args[0] + + err = w.ProviderExchangeFilters.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListExchangeFiltersRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListExchangeFiltersRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list EXCHANGE_ID" + cmd.Short = `List exchange filters.` + cmd.Long = `List exchange filters. + + List exchange filter` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listReq.ExchangeId = args[0] + + response := w.ProviderExchangeFilters.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateExchangeFilterRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateExchangeFilterRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update exchange filter.` + cmd.Long = `Update exchange filter. + + Update an exchange filter.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.ProviderExchangeFilters.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderExchangeFilters diff --git a/cmd/workspace/provider-exchanges/provider-exchanges.go b/cmd/workspace/provider-exchanges/provider-exchanges.go new file mode 100755 index 000000000..7ff73e0d1 --- /dev/null +++ b/cmd/workspace/provider-exchanges/provider-exchanges.go @@ -0,0 +1,580 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_exchanges + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-exchanges", + Short: `Marketplace exchanges allow providers to share their listings with a curated set of customers.`, + Long: `Marketplace exchanges allow providers to share their listings with a curated + set of customers.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newAddListingToExchange()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeleteListingFromExchange()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListExchangesForListing()) + cmd.AddCommand(newListListingsForExchange()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start add-listing-to-exchange command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var addListingToExchangeOverrides []func( + *cobra.Command, + *marketplace.AddExchangeForListingRequest, +) + +func newAddListingToExchange() *cobra.Command { + cmd := &cobra.Command{} + + var addListingToExchangeReq marketplace.AddExchangeForListingRequest + var addListingToExchangeJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&addListingToExchangeJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "add-listing-to-exchange LISTING_ID EXCHANGE_ID" + cmd.Short = `Add an exchange for listing.` + cmd.Long = `Add an exchange for listing. + + Associate an exchange with a listing` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(0)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'listing_id', 'exchange_id' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = addListingToExchangeJson.Unmarshal(&addListingToExchangeReq) + if err != nil { + return err + } + } + if !cmd.Flags().Changed("json") { + addListingToExchangeReq.ListingId = args[0] + } + if !cmd.Flags().Changed("json") { + addListingToExchangeReq.ExchangeId = args[1] + } + + response, err := w.ProviderExchanges.AddListingToExchange(ctx, addListingToExchangeReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range addListingToExchangeOverrides { + fn(cmd, &addListingToExchangeReq) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateExchangeRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateExchangeRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Create an exchange.` + cmd.Long = `Create an exchange. + + Create an exchange` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderExchanges.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteExchangeRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteExchangeRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Delete an exchange.` + cmd.Long = `Delete an exchange. + + This removes a listing from marketplace.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.Id = args[0] + + err = w.ProviderExchanges.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start delete-listing-from-exchange command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteListingFromExchangeOverrides []func( + *cobra.Command, + *marketplace.RemoveExchangeForListingRequest, +) + +func newDeleteListingFromExchange() *cobra.Command { + cmd := &cobra.Command{} + + var deleteListingFromExchangeReq marketplace.RemoveExchangeForListingRequest + + // TODO: short flags + + cmd.Use = "delete-listing-from-exchange ID" + cmd.Short = `Remove an exchange for listing.` + cmd.Long = `Remove an exchange for listing. + + Disassociate an exchange with a listing` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteListingFromExchangeReq.Id = args[0] + + err = w.ProviderExchanges.DeleteListingFromExchange(ctx, deleteListingFromExchangeReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteListingFromExchangeOverrides { + fn(cmd, &deleteListingFromExchangeReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetExchangeRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetExchangeRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get an exchange.` + cmd.Long = `Get an exchange. + + Get an exchange.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.Id = args[0] + + response, err := w.ProviderExchanges.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListExchangesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListExchangesRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List exchanges.` + cmd.Long = `List exchanges. + + List exchanges visible to provider` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ProviderExchanges.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start list-exchanges-for-listing command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listExchangesForListingOverrides []func( + *cobra.Command, + *marketplace.ListExchangesForListingRequest, +) + +func newListExchangesForListing() *cobra.Command { + cmd := &cobra.Command{} + + var listExchangesForListingReq marketplace.ListExchangesForListingRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listExchangesForListingReq.PageSize, "page-size", listExchangesForListingReq.PageSize, ``) + cmd.Flags().StringVar(&listExchangesForListingReq.PageToken, "page-token", listExchangesForListingReq.PageToken, ``) + + cmd.Use = "list-exchanges-for-listing LISTING_ID" + cmd.Short = `List exchanges for listing.` + cmd.Long = `List exchanges for listing. + + List exchanges associated with a listing` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listExchangesForListingReq.ListingId = args[0] + + response := w.ProviderExchanges.ListExchangesForListing(ctx, listExchangesForListingReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listExchangesForListingOverrides { + fn(cmd, &listExchangesForListingReq) + } + + return cmd +} + +// start list-listings-for-exchange command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listListingsForExchangeOverrides []func( + *cobra.Command, + *marketplace.ListListingsForExchangeRequest, +) + +func newListListingsForExchange() *cobra.Command { + cmd := &cobra.Command{} + + var listListingsForExchangeReq marketplace.ListListingsForExchangeRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listListingsForExchangeReq.PageSize, "page-size", listListingsForExchangeReq.PageSize, ``) + cmd.Flags().StringVar(&listListingsForExchangeReq.PageToken, "page-token", listListingsForExchangeReq.PageToken, ``) + + cmd.Use = "list-listings-for-exchange EXCHANGE_ID" + cmd.Short = `List listings for exchange.` + cmd.Long = `List listings for exchange. + + List listings associated with an exchange` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listListingsForExchangeReq.ExchangeId = args[0] + + response := w.ProviderExchanges.ListListingsForExchange(ctx, listListingsForExchangeReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listListingsForExchangeOverrides { + fn(cmd, &listListingsForExchangeReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateExchangeRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateExchangeRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update exchange.` + cmd.Long = `Update exchange. + + Update an exchange` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.ProviderExchanges.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderExchanges diff --git a/cmd/workspace/provider-files/provider-files.go b/cmd/workspace/provider-files/provider-files.go new file mode 100755 index 000000000..25e1addf5 --- /dev/null +++ b/cmd/workspace/provider-files/provider-files.go @@ -0,0 +1,303 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_files + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-files", + Short: `Marketplace offers a set of file APIs for various purposes such as preview notebooks and provider icons.`, + Long: `Marketplace offers a set of file APIs for various purposes such as preview + notebooks and provider icons.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateFileRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateFileRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.DisplayName, "display-name", createReq.DisplayName, ``) + + cmd.Use = "create" + cmd.Short = `Create a file.` + cmd.Long = `Create a file. + + Create a file. Currently, only provider icons and attached notebooks are + supported.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderFiles.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteFileRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteFileRequest + + // TODO: short flags + + cmd.Use = "delete FILE_ID" + cmd.Short = `Delete a file.` + cmd.Long = `Delete a file. + + Delete a file` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No FILE_ID argument specified. Loading names for Provider Files drop-down." + names, err := w.ProviderFiles.FileInfoDisplayNameToIdMap(ctx, marketplace.ListFilesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Files drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.FileId = args[0] + + err = w.ProviderFiles.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetFileRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetFileRequest + + // TODO: short flags + + cmd.Use = "get FILE_ID" + cmd.Short = `Get a file.` + cmd.Long = `Get a file. + + Get a file` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No FILE_ID argument specified. Loading names for Provider Files drop-down." + names, err := w.ProviderFiles.FileInfoDisplayNameToIdMap(ctx, marketplace.ListFilesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Files drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.FileId = args[0] + + response, err := w.ProviderFiles.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListFilesRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListFilesRequest + var listJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List files.` + cmd.Long = `List files. + + List files attached to a parent entity.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = listJson.Unmarshal(&listReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response := w.ProviderFiles.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// end service ProviderFiles diff --git a/cmd/workspace/provider-listings/provider-listings.go b/cmd/workspace/provider-listings/provider-listings.go new file mode 100755 index 000000000..0abdf51d8 --- /dev/null +++ b/cmd/workspace/provider-listings/provider-listings.go @@ -0,0 +1,360 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_listings + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-listings", + Short: `Listings are the core entities in the Marketplace.`, + Long: `Listings are the core entities in the Marketplace. They represent the products + that are available for consumption.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateListingRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateListingRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Create a listing.` + cmd.Long = `Create a listing. + + Create a new listing` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderListings.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteListingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteListingRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Delete a listing.` + cmd.Long = `Delete a listing. + + Delete a listing` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Listings drop-down." + names, err := w.ProviderListings.ListingSummaryNameToIdMap(ctx, marketplace.GetListingsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Listings drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.Id = args[0] + + err = w.ProviderListings.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetListingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetListingRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get a listing.` + cmd.Long = `Get a listing. + + Get a listing` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Listings drop-down." + names, err := w.ProviderListings.ListingSummaryNameToIdMap(ctx, marketplace.GetListingsRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Listings drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.Id = args[0] + + response, err := w.ProviderListings.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.GetListingsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.GetListingsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List listings.` + cmd.Long = `List listings. + + List listings owned by this provider` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ProviderListings.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateListingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateListingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update listing.` + cmd.Long = `Update listing. + + Update a listing` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.ProviderListings.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderListings diff --git a/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go new file mode 100755 index 000000000..a38d9f420 --- /dev/null +++ b/cmd/workspace/provider-personalization-requests/provider-personalization-requests.go @@ -0,0 +1,181 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_personalization_requests + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-personalization-requests", + Short: `Personalization requests are an alternate to instantly available listings.`, + Long: `Personalization requests are an alternate to instantly available listings. + Control the lifecycle of personalized solutions.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListAllPersonalizationRequestsRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListAllPersonalizationRequestsRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `All personalization requests across all listings.` + cmd.Long = `All personalization requests across all listings. + + List personalization requests to this provider. This will return all + personalization requests, regardless of which listing they are for.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ProviderPersonalizationRequests.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdatePersonalizationRequestRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdatePersonalizationRequestRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.Reason, "reason", updateReq.Reason, ``) + // TODO: complex arg: share + + cmd.Use = "update LISTING_ID REQUEST_ID STATUS" + cmd.Short = `Update personalization request status.` + cmd.Long = `Update personalization request status. + + Update personalization request. This method only permits updating the status + of the request.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(2)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only LISTING_ID, REQUEST_ID as positional arguments. Provide 'status' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.ListingId = args[0] + updateReq.RequestId = args[1] + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[2], &updateReq.Status) + if err != nil { + return fmt.Errorf("invalid STATUS: %s", args[2]) + } + } + + response, err := w.ProviderPersonalizationRequests.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderPersonalizationRequests diff --git a/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go new file mode 100755 index 000000000..8cee6e4eb --- /dev/null +++ b/cmd/workspace/provider-provider-analytics-dashboards/provider-provider-analytics-dashboards.go @@ -0,0 +1,240 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_provider_analytics_dashboards + +import ( + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-provider-analytics-dashboards", + Short: `Manage templated analytics solution for providers.`, + Long: `Manage templated analytics solution for providers.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetLatestVersion()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "create" + cmd.Short = `Create provider analytics dashboard.` + cmd.Long = `Create provider analytics dashboard. + + Create provider analytics dashboard. Returns Marketplace specific id. Not to + be confused with the Lakeview dashboard id.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.ProviderProviderAnalyticsDashboards.Create(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "get" + cmd.Short = `Get provider analytics dashboard.` + cmd.Long = `Get provider analytics dashboard. + + Get provider analytics dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.ProviderProviderAnalyticsDashboards.Get(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd) + } + + return cmd +} + +// start get-latest-version command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getLatestVersionOverrides []func( + *cobra.Command, +) + +func newGetLatestVersion() *cobra.Command { + cmd := &cobra.Command{} + + cmd.Use = "get-latest-version" + cmd.Short = `Get latest version of provider analytics dashboard.` + cmd.Long = `Get latest version of provider analytics dashboard. + + Get latest version of provider analytics dashboard.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + response, err := w.ProviderProviderAnalyticsDashboards.GetLatestVersion(ctx) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getLatestVersionOverrides { + fn(cmd) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateProviderAnalyticsDashboardRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateProviderAnalyticsDashboardRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().Int64Var(&updateReq.Version, "version", updateReq.Version, `this is the version of the dashboard template we want to update our user to current expectation is that it should be equal to latest version of the dashboard template.`) + + cmd.Use = "update ID" + cmd.Short = `Update provider analytics dashboard.` + cmd.Long = `Update provider analytics dashboard. + + Update provider analytics dashboard. + + Arguments: + ID: id is immutable property and can't be updated.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.Id = args[0] + + response, err := w.ProviderProviderAnalyticsDashboards.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderProviderAnalyticsDashboards diff --git a/cmd/workspace/provider-providers/provider-providers.go b/cmd/workspace/provider-providers/provider-providers.go new file mode 100755 index 000000000..b7273a344 --- /dev/null +++ b/cmd/workspace/provider-providers/provider-providers.go @@ -0,0 +1,359 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package provider_providers + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/marketplace" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "provider-providers", + Short: `Providers are entities that manage assets in Marketplace.`, + Long: `Providers are entities that manage assets in Marketplace.`, + GroupID: "marketplace", + Annotations: map[string]string{ + "package": "marketplace", + }, + + // This service is being previewed; hide from help output. + Hidden: true, + } + + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *marketplace.CreateProviderRequest, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq marketplace.CreateProviderRequest + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "create" + cmd.Short = `Create a provider.` + cmd.Long = `Create a provider. + + Create a provider` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.ProviderProviders.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *marketplace.DeleteProviderRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq marketplace.DeleteProviderRequest + + // TODO: short flags + + cmd.Use = "delete ID" + cmd.Short = `Delete provider.` + cmd.Long = `Delete provider. + + Delete provider` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Providers drop-down." + names, err := w.ProviderProviders.ProviderInfoNameToIdMap(ctx, marketplace.ListProvidersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Providers drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + deleteReq.Id = args[0] + + err = w.ProviderProviders.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *marketplace.GetProviderRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq marketplace.GetProviderRequest + + // TODO: short flags + + cmd.Use = "get ID" + cmd.Short = `Get provider.` + cmd.Long = `Get provider. + + Get provider profile` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No ID argument specified. Loading names for Provider Providers drop-down." + names, err := w.ProviderProviders.ProviderInfoNameToIdMap(ctx, marketplace.ListProvidersRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Provider Providers drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have ") + } + getReq.Id = args[0] + + response, err := w.ProviderProviders.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start list command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listOverrides []func( + *cobra.Command, + *marketplace.ListProvidersRequest, +) + +func newList() *cobra.Command { + cmd := &cobra.Command{} + + var listReq marketplace.ListProvidersRequest + + // TODO: short flags + + cmd.Flags().IntVar(&listReq.PageSize, "page-size", listReq.PageSize, ``) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, ``) + + cmd.Use = "list" + cmd.Short = `List providers.` + cmd.Long = `List providers. + + List provider profiles for account.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response := w.ProviderProviders.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listOverrides { + fn(cmd, &listReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *marketplace.UpdateProviderRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq marketplace.UpdateProviderRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update ID" + cmd.Short = `Update provider.` + cmd.Long = `Update provider. + + Update provider profile` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + updateReq.Id = args[0] + + response, err := w.ProviderProviders.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service ProviderProviders diff --git a/cmd/workspace/providers/providers.go b/cmd/workspace/providers/providers.go index 851c668a7..7305191c8 100755 --- a/cmd/workspace/providers/providers.go +++ b/cmd/workspace/providers/providers.go @@ -29,6 +29,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListShares()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -73,13 +81,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'authentication_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -123,12 +131,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -200,12 +202,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -278,12 +274,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -314,7 +304,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -323,11 +313,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Providers.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Providers.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -342,12 +329,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start list-shares command // Slice with functions to override default command behavior. @@ -401,11 +382,8 @@ func newListShares() *cobra.Command { } listSharesReq.Name = args[0] - response, err := w.Providers.ListSharesAll(ctx, listSharesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Providers.ListShares(ctx, listSharesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -420,12 +398,6 @@ func newListShares() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListShares()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -512,10 +484,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Providers diff --git a/cmd/workspace/quality-monitors/quality-monitors.go b/cmd/workspace/quality-monitors/quality-monitors.go new file mode 100755 index 000000000..95d992164 --- /dev/null +++ b/cmd/workspace/quality-monitors/quality-monitors.go @@ -0,0 +1,674 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package quality_monitors + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/catalog" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "quality-monitors", + Short: `A monitor computes and monitors data or model quality metrics for a table over time.`, + Long: `A monitor computes and monitors data or model quality metrics for a table over + time. It generates metrics tables and a dashboard that you can use to monitor + table health and set alerts. + + Most write operations require the user to be the owner of the table (or its + parent schema or parent catalog). Viewing the dashboard, computed metrics, or + monitor configuration only requires the user to have **SELECT** privileges on + the table (along with **USE_SCHEMA** and **USE_CATALOG**).`, + GroupID: "catalog", + Annotations: map[string]string{ + "package": "catalog", + }, + } + + // Add methods + cmd.AddCommand(newCancelRefresh()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetRefresh()) + cmd.AddCommand(newListRefreshes()) + cmd.AddCommand(newRunRefresh()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start cancel-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cancelRefreshOverrides []func( + *cobra.Command, + *catalog.CancelRefreshRequest, +) + +func newCancelRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var cancelRefreshReq catalog.CancelRefreshRequest + + // TODO: short flags + + cmd.Use = "cancel-refresh TABLE_NAME REFRESH_ID" + cmd.Short = `Cancel refresh.` + cmd.Long = `Cancel refresh. + + Cancel an active monitor refresh for the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + TABLE_NAME: Full name of the table. + REFRESH_ID: ID of the refresh.` + + // This command is being previewed; hide from help output. + cmd.Hidden = true + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + cancelRefreshReq.TableName = args[0] + cancelRefreshReq.RefreshId = args[1] + + err = w.QualityMonitors.CancelRefresh(ctx, cancelRefreshReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range cancelRefreshOverrides { + fn(cmd, &cancelRefreshReq) + } + + return cmd +} + +// start create command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var createOverrides []func( + *cobra.Command, + *catalog.CreateMonitor, +) + +func newCreate() *cobra.Command { + cmd := &cobra.Command{} + + var createReq catalog.CreateMonitor + var createJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&createReq.BaselineTableName, "baseline-table-name", createReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`) + // TODO: array: custom_metrics + // TODO: complex arg: data_classification_config + // TODO: complex arg: inference_log + // TODO: complex arg: notifications + // TODO: complex arg: schedule + cmd.Flags().BoolVar(&createReq.SkipBuiltinDashboard, "skip-builtin-dashboard", createReq.SkipBuiltinDashboard, `Whether to skip creating a default dashboard summarizing data quality metrics.`) + // TODO: array: slicing_exprs + // TODO: complex arg: snapshot + // TODO: complex arg: time_series + cmd.Flags().StringVar(&createReq.WarehouseId, "warehouse-id", createReq.WarehouseId, `Optional argument to specify the warehouse for dashboard creation.`) + + cmd.Use = "create TABLE_NAME ASSETS_DIR OUTPUT_SCHEMA_NAME" + cmd.Short = `Create a table monitor.` + cmd.Long = `Create a table monitor. + + Creates a new monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog, have + **USE_SCHEMA** on the table's parent schema, and have **SELECT** access on the + table 2. have **USE_CATALOG** on the table's parent catalog, be an owner of + the table's parent schema, and have **SELECT** access on the table. 3. have + the following permissions: - **USE_CATALOG** on the table's parent catalog - + **USE_SCHEMA** on the table's parent schema - be an owner of the table. + + Workspace assets, such as the dashboard, will be created in the workspace + where this call was made. + + Arguments: + TABLE_NAME: Full name of the table. + ASSETS_DIR: The directory to store monitoring assets (e.g. dashboard, metric tables). + OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only TABLE_NAME as positional arguments. Provide 'assets_dir', 'output_schema_name' in your JSON input") + } + return nil + } + check := root.ExactArgs(3) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = createJson.Unmarshal(&createReq) + if err != nil { + return err + } + } + createReq.TableName = args[0] + if !cmd.Flags().Changed("json") { + createReq.AssetsDir = args[1] + } + if !cmd.Flags().Changed("json") { + createReq.OutputSchemaName = args[2] + } + + response, err := w.QualityMonitors.Create(ctx, createReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range createOverrides { + fn(cmd, &createReq) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *catalog.DeleteQualityMonitorRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq catalog.DeleteQualityMonitorRequest + + // TODO: short flags + + cmd.Use = "delete TABLE_NAME" + cmd.Short = `Delete a table monitor.` + cmd.Long = `Delete a table monitor. + + Deletes a monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table. + + Additionally, the call must be made from the workspace where the monitor was + created. + + Note that the metric tables and dashboard will not be deleted as part of this + call; those assets must be manually cleaned up (if desired). + + Arguments: + TABLE_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + deleteReq.TableName = args[0] + + err = w.QualityMonitors.Delete(ctx, deleteReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *catalog.GetQualityMonitorRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq catalog.GetQualityMonitorRequest + + // TODO: short flags + + cmd.Use = "get TABLE_NAME" + cmd.Short = `Get a table monitor.` + cmd.Long = `Get a table monitor. + + Gets a monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema. 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + The returned information includes configuration values, as well as information + on assets created by the monitor. Some information (e.g., dashboard) may be + filtered out if the caller is in a different workspace than where the monitor + was created. + + Arguments: + TABLE_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getReq.TableName = args[0] + + response, err := w.QualityMonitors.Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start get-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getRefreshOverrides []func( + *cobra.Command, + *catalog.GetRefreshRequest, +) + +func newGetRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var getRefreshReq catalog.GetRefreshRequest + + // TODO: short flags + + cmd.Use = "get-refresh TABLE_NAME REFRESH_ID" + cmd.Short = `Get refresh.` + cmd.Long = `Get refresh. + + Gets info about a specific monitor refresh using the given refresh ID. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + TABLE_NAME: Full name of the table. + REFRESH_ID: ID of the refresh.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getRefreshReq.TableName = args[0] + getRefreshReq.RefreshId = args[1] + + response, err := w.QualityMonitors.GetRefresh(ctx, getRefreshReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getRefreshOverrides { + fn(cmd, &getRefreshReq) + } + + return cmd +} + +// start list-refreshes command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var listRefreshesOverrides []func( + *cobra.Command, + *catalog.ListRefreshesRequest, +) + +func newListRefreshes() *cobra.Command { + cmd := &cobra.Command{} + + var listRefreshesReq catalog.ListRefreshesRequest + + // TODO: short flags + + cmd.Use = "list-refreshes TABLE_NAME" + cmd.Short = `List refreshes.` + cmd.Long = `List refreshes. + + Gets an array containing the history of the most recent refreshes (up to 25) + for this table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - + **SELECT** privilege on the table. + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + TABLE_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + listRefreshesReq.TableName = args[0] + + response, err := w.QualityMonitors.ListRefreshes(ctx, listRefreshesReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range listRefreshesOverrides { + fn(cmd, &listRefreshesReq) + } + + return cmd +} + +// start run-refresh command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var runRefreshOverrides []func( + *cobra.Command, + *catalog.RunRefreshRequest, +) + +func newRunRefresh() *cobra.Command { + cmd := &cobra.Command{} + + var runRefreshReq catalog.RunRefreshRequest + + // TODO: short flags + + cmd.Use = "run-refresh TABLE_NAME" + cmd.Short = `Queue a metric refresh for a monitor.` + cmd.Long = `Queue a metric refresh for a monitor. + + Queues a metric refresh on the monitor for the specified table. The refresh + will execute in the background. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table + + Additionally, the call must be made from the workspace where the monitor was + created. + + Arguments: + TABLE_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + runRefreshReq.TableName = args[0] + + response, err := w.QualityMonitors.RunRefresh(ctx, runRefreshReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range runRefreshOverrides { + fn(cmd, &runRefreshReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *catalog.UpdateMonitor, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq catalog.UpdateMonitor + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&updateReq.BaselineTableName, "baseline-table-name", updateReq.BaselineTableName, `Name of the baseline table from which drift metrics are computed from.`) + // TODO: array: custom_metrics + cmd.Flags().StringVar(&updateReq.DashboardId, "dashboard-id", updateReq.DashboardId, `Id of dashboard that visualizes the computed metrics.`) + // TODO: complex arg: data_classification_config + // TODO: complex arg: inference_log + // TODO: complex arg: notifications + // TODO: complex arg: schedule + // TODO: array: slicing_exprs + // TODO: complex arg: snapshot + // TODO: complex arg: time_series + + cmd.Use = "update TABLE_NAME OUTPUT_SCHEMA_NAME" + cmd.Short = `Update a table monitor.` + cmd.Long = `Update a table monitor. + + Updates a monitor for the specified table. + + The caller must either: 1. be an owner of the table's parent catalog 2. have + **USE_CATALOG** on the table's parent catalog and be an owner of the table's + parent schema 3. have the following permissions: - **USE_CATALOG** on the + table's parent catalog - **USE_SCHEMA** on the table's parent schema - be an + owner of the table. + + Additionally, the call must be made from the workspace where the monitor was + created, and the caller must be the original creator of the monitor. + + Certain configuration fields, such as output asset identifiers, cannot be + updated. + + Arguments: + TABLE_NAME: Full name of the table. + OUTPUT_SCHEMA_NAME: Schema where output metric tables are created.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + if cmd.Flags().Changed("json") { + err := root.ExactArgs(1)(cmd, args) + if err != nil { + return fmt.Errorf("when --json flag is specified, provide only TABLE_NAME as positional arguments. Provide 'output_schema_name' in your JSON input") + } + return nil + } + check := root.ExactArgs(2) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } + updateReq.TableName = args[0] + if !cmd.Flags().Changed("json") { + updateReq.OutputSchemaName = args[1] + } + + response, err := w.QualityMonitors.Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service QualityMonitors diff --git a/cmd/workspace/queries/overrides.go b/cmd/workspace/queries/overrides.go index a06dabdeb..d7edf93a0 100644 --- a/cmd/workspace/queries/overrides.go +++ b/cmd/workspace/queries/overrides.go @@ -8,8 +8,9 @@ import ( func listOverride(listCmd *cobra.Command, listReq *sql.ListQueriesRequest) { // TODO: figure out colored/non-colored headers and colspan shifts + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{header "Author"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{header "Author"}} {{range .}}{{.Id|green}} {{.Name|cyan}} {{.User.Email|cyan}} {{end}}`) } diff --git a/cmd/workspace/queries/queries.go b/cmd/workspace/queries/queries.go index 38fa9c0c5..650131974 100755 --- a/cmd/workspace/queries/queries.go +++ b/cmd/workspace/queries/queries.go @@ -23,13 +23,26 @@ func New() *cobra.Command { Long: `These endpoints are used for CRUD operations on query definitions. Query definitions include the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. Queries can be scheduled using the - sql_task type of the Jobs API, e.g. :method:jobs/create.`, + sql_task type of the Jobs API, e.g. :method:jobs/create. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources`, GroupID: "sql", Annotations: map[string]string{ "package": "sql", }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRestore()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -68,7 +81,12 @@ func newCreate() *cobra.Command { available SQL warehouses. Or you can copy the data_source_id from an existing query. - **Note**: You cannot add a visualization until you create the query.` + **Note**: You cannot add a visualization until you create the query. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -105,12 +123,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -133,7 +145,12 @@ func newDelete() *cobra.Command { Moves a query to the trash. Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. The trash is - deleted after 30 days.` + deleted after 30 days. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -180,12 +197,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -207,7 +218,12 @@ func newGet() *cobra.Command { cmd.Long = `Get a query definition. Retrieve a query object definition along with contextual permissions - information about the currently authenticated user.` + information about the currently authenticated user. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -254,12 +270,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -286,12 +296,20 @@ func newList() *cobra.Command { cmd.Long = `Get a list of queries. Gets a list of queries. Optionally, this list can be filtered by a search - term.` + term. + + **Warning**: Calling this API concurrently 10 or more times could result in + throttling, service degradation, or a temporary ban. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -300,11 +318,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Queries.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Queries.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -319,12 +334,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start restore command // Slice with functions to override default command behavior. @@ -346,7 +355,12 @@ func newRestore() *cobra.Command { cmd.Long = `Restore a query. Restore a query that has been moved to the trash. A restored query appears in - list views and searches. You can use restored queries for alerts.` + list views and searches. You can use restored queries for alerts. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -393,12 +407,6 @@ func newRestore() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRestore()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -422,6 +430,8 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The title of this query that appears in list views, widget headings, and on the query page.`) // TODO: any: options cmd.Flags().StringVar(&updateReq.Query, "query", updateReq.Query, `The text of the query to be run.`) + cmd.Flags().Var(&updateReq.RunAsRole, "run-as-role", `Sets the **Run as** role for the object. Supported values: [owner, viewer]`) + // TODO: array: tags cmd.Use = "update QUERY_ID" cmd.Short = `Change a query definition.` @@ -429,7 +439,12 @@ func newUpdate() *cobra.Command { Modify this query definition. - **Note**: You cannot undo this operation.` + **Note**: You cannot undo this operation. + + **Note**: A new version of the Databricks SQL API will soon be available. + [Learn more] + + [Learn more]: https://docs.databricks.com/en/whats-coming.html#updates-to-the-databricks-sql-api-for-managing-queries-alerts-and-data-sources` cmd.Annotations = make(map[string]string) @@ -482,10 +497,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Queries diff --git a/cmd/workspace/query-history/query-history.go b/cmd/workspace/query-history/query-history.go index 337ab4033..60d6004d9 100755 --- a/cmd/workspace/query-history/query-history.go +++ b/cmd/workspace/query-history/query-history.go @@ -24,6 +24,9 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -64,7 +67,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -73,11 +76,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.QueryHistory.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.QueryHistory.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -92,10 +92,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service QueryHistory diff --git a/cmd/workspace/query-visualizations/query-visualizations.go b/cmd/workspace/query-visualizations/query-visualizations.go index 4f04c4261..c94d83a82 100755 --- a/cmd/workspace/query-visualizations/query-visualizations.go +++ b/cmd/workspace/query-visualizations/query-visualizations.go @@ -32,6 +32,11 @@ func New() *cobra.Command { Hidden: true, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -97,12 +102,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -129,7 +128,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -159,12 +158,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -193,7 +186,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -231,10 +224,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service QueryVisualizations diff --git a/cmd/workspace/recipient-activation/recipient-activation.go b/cmd/workspace/recipient-activation/recipient-activation.go index 5fb5c7b9e..457fa9042 100755 --- a/cmd/workspace/recipient-activation/recipient-activation.go +++ b/cmd/workspace/recipient-activation/recipient-activation.go @@ -33,6 +33,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGetActivationUrlInfo()) + cmd.AddCommand(newRetrieveToken()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -69,7 +73,7 @@ func newGetActivationUrlInfo() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -99,12 +103,6 @@ func newGetActivationUrlInfo() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetActivationUrlInfo()) - }) -} - // start retrieve-token command // Slice with functions to override default command behavior. @@ -134,7 +132,7 @@ func newRetrieveToken() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -164,10 +162,4 @@ func newRetrieveToken() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRetrieveToken()) - }) -} - // end service RecipientActivation diff --git a/cmd/workspace/recipients/recipients.go b/cmd/workspace/recipients/recipients.go index 463d7985c..c21d8a8c0 100755 --- a/cmd/workspace/recipients/recipients.go +++ b/cmd/workspace/recipients/recipients.go @@ -43,6 +43,15 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRotateToken()) + cmd.AddCommand(newSharePermissions()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -92,13 +101,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'authentication_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -142,12 +151,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -219,12 +222,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -297,12 +294,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -333,7 +324,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -342,11 +333,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Recipients.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Recipients.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -361,12 +349,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start rotate-token command // Slice with functions to override default command behavior. @@ -403,13 +385,13 @@ func newRotateToken() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only NAME as positional arguments. Provide 'existing_token_expire_in_seconds' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -451,12 +433,6 @@ func newRotateToken() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRotateToken()) - }) -} - // start share-permissions command // Slice with functions to override default command behavior. @@ -528,12 +504,6 @@ func newSharePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSharePermissions()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -620,10 +590,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Recipients diff --git a/cmd/workspace/registered-models/registered-models.go b/cmd/workspace/registered-models/registered-models.go index 774859f17..08e11d686 100755 --- a/cmd/workspace/registered-models/registered-models.go +++ b/cmd/workspace/registered-models/registered-models.go @@ -55,6 +55,15 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newDeleteAlias()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetAlias()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -110,13 +119,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'catalog_name', 'schema_name', 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -160,12 +169,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -242,12 +245,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start delete-alias command // Slice with functions to override default command behavior. @@ -282,7 +279,7 @@ func newDeleteAlias() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -313,12 +310,6 @@ func newDeleteAlias() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteAlias()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -335,6 +326,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include registered models in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get FULL_NAME" cmd.Short = `Get a Registered Model.` cmd.Long = `Get a Registered Model. @@ -394,12 +387,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -417,6 +404,7 @@ func newList() *cobra.Command { // TODO: short flags cmd.Flags().StringVar(&listReq.CatalogName, "catalog-name", listReq.CatalogName, `The identifier of the catalog under which to list registered models.`) + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include registered models in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Max number of registered models to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token to send for the next page of results (pagination).`) cmd.Flags().StringVar(&listReq.SchemaName, "schema-name", listReq.SchemaName, `The identifier of the schema under which to list registered models.`) @@ -441,7 +429,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -450,11 +438,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.RegisteredModels.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.RegisteredModels.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -469,12 +454,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-alias command // Slice with functions to override default command behavior. @@ -513,13 +492,13 @@ func newSetAlias() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(2)(cmd, args) + err := root.ExactArgs(2)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, provide only FULL_NAME, ALIAS as positional arguments. Provide 'version_num' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -562,12 +541,6 @@ func newSetAlias() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetAlias()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -587,7 +560,6 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the registered model.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the registered model.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the registered model.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the registered model.`) @@ -659,10 +631,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service RegisteredModels diff --git a/cmd/workspace/repos/overrides.go b/cmd/workspace/repos/overrides.go index f6f26f81d..96d645efb 100644 --- a/cmd/workspace/repos/overrides.go +++ b/cmd/workspace/repos/overrides.go @@ -25,7 +25,7 @@ func createOverride(createCmd *cobra.Command, createReq *workspace.CreateRepo) { // If the provider argument is not specified, we try to detect it from the URL. check := cobra.RangeArgs(1, 2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + check = root.ExactArgs(0) } return check(cmd, args) } diff --git a/cmd/workspace/repos/repos.go b/cmd/workspace/repos/repos.go index 62f637502..fb3d51b06 100755 --- a/cmd/workspace/repos/repos.go +++ b/cmd/workspace/repos/repos.go @@ -36,6 +36,17 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -83,13 +94,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'url', 'provider' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -130,12 +141,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -209,12 +214,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -288,12 +287,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -364,12 +357,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -441,12 +428,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -476,7 +457,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -485,11 +466,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Repos.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Repos.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -504,12 +482,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -591,12 +563,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -683,12 +649,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -770,10 +730,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Repos diff --git a/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go new file mode 100755 index 000000000..5e9f59d2c --- /dev/null +++ b/cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go @@ -0,0 +1,227 @@ +// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +package restrict_workspace_admins + +import ( + "fmt" + + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/service/settings" + "github.com/spf13/cobra" +) + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var cmdOverrides []func(*cobra.Command) + +func New() *cobra.Command { + cmd := &cobra.Command{ + Use: "restrict-workspace-admins", + Short: `The Restrict Workspace Admins setting lets you control the capabilities of workspace admins.`, + Long: `The Restrict Workspace Admins setting lets you control the capabilities of + workspace admins. With the setting status set to ALLOW_ALL, workspace admins + can create service principal personal access tokens on behalf of any service + principal in their workspace. Workspace admins can also change a job owner to + any user in their workspace. And they can change the job run_as setting to any + user in their workspace or to a service principal on which they have the + Service Principal User role. With the setting status set to + RESTRICT_TOKENS_AND_JOB_RUN_AS, workspace admins can only create personal + access tokens on behalf of service principals they have the Service Principal + User role on. They can also only change a job owner to themselves. And they + can change the job run_as setting to themselves or to a service principal on + which they have the Service Principal User role.`, + } + + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newUpdate()) + + // Apply optional overrides to this command. + for _, fn := range cmdOverrides { + fn(cmd) + } + + return cmd +} + +// start delete command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var deleteOverrides []func( + *cobra.Command, + *settings.DeleteRestrictWorkspaceAdminsSettingRequest, +) + +func newDelete() *cobra.Command { + cmd := &cobra.Command{} + + var deleteReq settings.DeleteRestrictWorkspaceAdminsSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&deleteReq.Etag, "etag", deleteReq.Etag, `etag used for versioning.`) + + cmd.Use = "delete" + cmd.Short = `Delete the restrict workspace admins setting.` + cmd.Long = `Delete the restrict workspace admins setting. + + Reverts the restrict workspace admins setting status for the workspace. A + fresh etag needs to be provided in DELETE requests (as a query parameter). + The etag can be retrieved by making a GET request before the DELETE request. + If the setting is updated/deleted concurrently, DELETE fails with 409 and + the request must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.RestrictWorkspaceAdmins().Delete(ctx, deleteReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range deleteOverrides { + fn(cmd, &deleteReq) + } + + return cmd +} + +// start get command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOverrides []func( + *cobra.Command, + *settings.GetRestrictWorkspaceAdminsSettingRequest, +) + +func newGet() *cobra.Command { + cmd := &cobra.Command{} + + var getReq settings.GetRestrictWorkspaceAdminsSettingRequest + + // TODO: short flags + + cmd.Flags().StringVar(&getReq.Etag, "etag", getReq.Etag, `etag used for versioning.`) + + cmd.Use = "get" + cmd.Short = `Get the restrict workspace admins setting.` + cmd.Long = `Get the restrict workspace admins setting. + + Gets the restrict workspace admins setting.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(0) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + response, err := w.Settings.RestrictWorkspaceAdmins().Get(ctx, getReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOverrides { + fn(cmd, &getReq) + } + + return cmd +} + +// start update command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var updateOverrides []func( + *cobra.Command, + *settings.UpdateRestrictWorkspaceAdminsSettingRequest, +) + +func newUpdate() *cobra.Command { + cmd := &cobra.Command{} + + var updateReq settings.UpdateRestrictWorkspaceAdminsSettingRequest + var updateJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Use = "update" + cmd.Short = `Update the restrict workspace admins setting.` + cmd.Long = `Update the restrict workspace admins setting. + + Updates the restrict workspace admins setting for the workspace. A fresh etag + needs to be provided in PATCH requests (as part of the setting field). The + etag can be retrieved by making a GET request before the PATCH request. If + the setting is updated concurrently, PATCH fails with 409 and the request + must be retried by using the fresh etag in the 409 response.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = updateJson.Unmarshal(&updateReq) + if err != nil { + return err + } + } else { + return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") + } + + response, err := w.Settings.RestrictWorkspaceAdmins().Update(ctx, updateReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range updateOverrides { + fn(cmd, &updateReq) + } + + return cmd +} + +// end service RestrictWorkspaceAdmins diff --git a/cmd/workspace/schemas/overrides.go b/cmd/workspace/schemas/overrides.go index 180690b6e..ba4c65ce7 100644 --- a/cmd/workspace/schemas/overrides.go +++ b/cmd/workspace/schemas/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *catalog.ListSchemasRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Full Name"}} {{header "Owner"}} {{header "Comment"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Full Name"}} {{header "Owner"}} {{header "Comment"}} {{range .}}{{.FullName|green}} {{.Owner|cyan}} {{.Comment}} {{end}}`) } diff --git a/cmd/workspace/schemas/schemas.go b/cmd/workspace/schemas/schemas.go index bad61a5f1..710141913 100755 --- a/cmd/workspace/schemas/schemas.go +++ b/cmd/workspace/schemas/schemas.go @@ -31,6 +31,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -77,13 +84,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'catalog_name' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -124,12 +131,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -201,12 +202,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -223,6 +218,8 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include schemas in the response for which the principal can only access selective metadata for.`) + cmd.Use = "get FULL_NAME" cmd.Short = `Get a schema.` cmd.Long = `Get a schema. @@ -279,12 +276,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -301,6 +292,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include schemas in the response for which the principal can only access selective metadata for.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of schemas to return.`) cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque pagination token to go to next page based on previous query.`) @@ -311,10 +303,8 @@ func newList() *cobra.Command { Gets an array of schemas for a catalog in the metastore. If the caller is the metastore admin or the owner of the parent catalog, all schemas for the catalog will be retrieved. Otherwise, only schemas owned by the caller (or for - which the caller has the **USE_SCHEMA** privilege) will be retrieved. For - unpaginated request, there is no guarantee of a specific ordering of the - elements in the array. For paginated request, elements are ordered by their - name. + which the caller has the **USE_SCHEMA** privilege) will be retrieved. There is + no guarantee of a specific ordering of the elements in the array. Arguments: CATALOG_NAME: Parent catalog for schemas of interest.` @@ -322,7 +312,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -333,11 +323,8 @@ func newList() *cobra.Command { listReq.CatalogName = args[0] - response, err := w.Schemas.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Schemas.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -352,12 +339,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -378,7 +359,6 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().Var(&updateReq.EnablePredictiveOptimization, "enable-predictive-optimization", `Whether predictive optimization should be enabled for this object and objects under it. Supported values: [DISABLE, ENABLE, INHERIT]`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `Name of schema, relative to parent catalog.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the schema.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of schema.`) // TODO: map via StringToStringVar: properties @@ -447,10 +427,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Schemas diff --git a/cmd/workspace/secrets/overrides.go b/cmd/workspace/secrets/overrides.go index 6e765bf73..b215f17a7 100644 --- a/cmd/workspace/secrets/overrides.go +++ b/cmd/workspace/secrets/overrides.go @@ -11,15 +11,17 @@ func cmdOverride(cmd *cobra.Command) { } func listScopesOverride(listScopesCmd *cobra.Command) { + listScopesCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Scope"}} {{header "Backend Type"}}`) listScopesCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Scope"}} {{header "Backend Type"}} {{range .}}{{.Name|green}} {{.BackendType}} {{end}}`) } func listSecretsOverride(listSecretsCommand *cobra.Command, _ *workspace.ListSecretsRequest) { + listSecretsCommand.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Key"}} {{header "Last Updated Timestamp"}}`) listSecretsCommand.Annotations["template"] = cmdio.Heredoc(` - {{header "Key"}} {{header "Last Updated Timestamp"}} {{range .}}{{.Key|green}} {{.LastUpdatedTimestamp}} {{end}}`) } diff --git a/cmd/workspace/secrets/put_secret.go b/cmd/workspace/secrets/put_secret.go index 2fbf49c5c..e323c7a10 100644 --- a/cmd/workspace/secrets/put_secret.go +++ b/cmd/workspace/secrets/put_secret.go @@ -50,9 +50,9 @@ func newPutSecret() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) if cmd.Flags().Changed("json") { - check = cobra.ExactArgs(0) + check = root.ExactArgs(0) } return check(cmd, args) } diff --git a/cmd/workspace/secrets/secrets.go b/cmd/workspace/secrets/secrets.go index 270538b00..f836a2670 100755 --- a/cmd/workspace/secrets/secrets.go +++ b/cmd/workspace/secrets/secrets.go @@ -38,6 +38,18 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateScope()) + cmd.AddCommand(newDeleteAcl()) + cmd.AddCommand(newDeleteScope()) + cmd.AddCommand(newDeleteSecret()) + cmd.AddCommand(newGetAcl()) + cmd.AddCommand(newGetSecret()) + cmd.AddCommand(newListAcls()) + cmd.AddCommand(newListScopes()) + cmd.AddCommand(newListSecrets()) + cmd.AddCommand(newPutAcl()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -73,8 +85,7 @@ func newCreateScope() *cobra.Command { cmd.Long = `Create a new secret scope. The scope name must consist of alphanumeric characters, dashes, underscores, - and periods, and may not exceed 128 characters. The maximum number of scopes - in a workspace is 100. + and periods, and may not exceed 128 characters. Arguments: SCOPE: Scope name requested by the user. Scope names are unique.` @@ -83,13 +94,13 @@ func newCreateScope() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -127,12 +138,6 @@ func newCreateScope() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateScope()) - }) -} - // start delete-acl command // Slice with functions to override default command behavior. @@ -170,13 +175,13 @@ func newDeleteAcl() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope', 'principal' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -217,12 +222,6 @@ func newDeleteAcl() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteAcl()) - }) -} - // start delete-scope command // Slice with functions to override default command behavior. @@ -258,13 +257,13 @@ func newDeleteScope() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -302,12 +301,6 @@ func newDeleteScope() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteScope()) - }) -} - // start delete-secret command // Slice with functions to override default command behavior. @@ -345,13 +338,13 @@ func newDeleteSecret() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope', 'key' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -392,12 +385,6 @@ func newDeleteSecret() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteSecret()) - }) -} - // start get-acl command // Slice with functions to override default command behavior. @@ -432,7 +419,7 @@ func newGetAcl() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -463,12 +450,6 @@ func newGetAcl() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetAcl()) - }) -} - // start get-secret command // Slice with functions to override default command behavior. @@ -509,7 +490,7 @@ func newGetSecret() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -540,12 +521,6 @@ func newGetSecret() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetSecret()) - }) -} - // start list-acls command // Slice with functions to override default command behavior. @@ -579,7 +554,7 @@ func newListAcls() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -590,11 +565,8 @@ func newListAcls() *cobra.Command { listAclsReq.Scope = args[0] - response, err := w.Secrets.ListAclsAll(ctx, listAclsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Secrets.ListAcls(ctx, listAclsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -609,12 +581,6 @@ func newListAcls() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListAcls()) - }) -} - // start list-scopes command // Slice with functions to override default command behavior. @@ -641,11 +607,8 @@ func newListScopes() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Secrets.ListScopesAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Secrets.ListScopes(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -660,12 +623,6 @@ func newListScopes() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListScopes()) - }) -} - // start list-secrets command // Slice with functions to override default command behavior. @@ -701,7 +658,7 @@ func newListSecrets() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -712,11 +669,8 @@ func newListSecrets() *cobra.Command { listSecretsReq.Scope = args[0] - response, err := w.Secrets.ListSecretsAll(ctx, listSecretsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Secrets.ListSecrets(ctx, listSecretsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -731,12 +685,6 @@ func newListSecrets() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListSecrets()) - }) -} - // start put-acl command // Slice with functions to override default command behavior. @@ -795,13 +743,13 @@ func newPutAcl() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'scope', 'principal', 'permission' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -848,10 +796,4 @@ func newPutAcl() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPutAcl()) - }) -} - // end service Secrets diff --git a/cmd/workspace/service-principals/service-principals.go b/cmd/workspace/service-principals/service-principals.go index 5e66804d1..957cb1265 100755 --- a/cmd/workspace/service-principals/service-principals.go +++ b/cmd/workspace/service-principals/service-principals.go @@ -32,6 +32,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -77,7 +85,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -112,12 +120,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -188,12 +190,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -265,12 +261,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -304,7 +294,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -313,11 +303,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ServicePrincipals.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ServicePrincipals.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -332,12 +319,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -420,12 +401,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -516,10 +491,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service ServicePrincipals diff --git a/cmd/workspace/serving-endpoints/serving-endpoints.go b/cmd/workspace/serving-endpoints/serving-endpoints.go index 8c488d093..b92f824d3 100755 --- a/cmd/workspace/serving-endpoints/serving-endpoints.go +++ b/cmd/workspace/serving-endpoints/serving-endpoints.go @@ -40,6 +40,24 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newBuildLogs()) + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newExportMetrics()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetOpenApi()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newLogs()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newPut()) + cmd.AddCommand(newQuery()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdateConfig()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -65,9 +83,8 @@ func newBuildLogs() *cobra.Command { // TODO: short flags cmd.Use = "build-logs NAME SERVED_MODEL_NAME" - cmd.Short = `Retrieve the logs associated with building the model's environment for a given serving endpoint's served model.` - cmd.Long = `Retrieve the logs associated with building the model's environment for a given - serving endpoint's served model. + cmd.Short = `Get build logs for a served model.` + cmd.Long = `Get build logs for a served model. Retrieves the build logs associated with the provided served model. @@ -80,7 +97,7 @@ func newBuildLogs() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -111,12 +128,6 @@ func newBuildLogs() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newBuildLogs()) - }) -} - // start create command // Slice with functions to override default command behavior. @@ -141,6 +152,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) // TODO: array: rate_limits + cmd.Flags().BoolVar(&createReq.RouteOptimized, "route-optimized", createReq.RouteOptimized, `Enable route optimization for the serving endpoint.`) // TODO: array: tags cmd.Use = "create" @@ -195,12 +207,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -227,7 +233,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -257,12 +263,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start export-metrics command // Slice with functions to override default command behavior. @@ -280,8 +280,8 @@ func newExportMetrics() *cobra.Command { // TODO: short flags cmd.Use = "export-metrics NAME" - cmd.Short = `Retrieve the metrics associated with a serving endpoint.` - cmd.Long = `Retrieve the metrics associated with a serving endpoint. + cmd.Short = `Get metrics of a serving endpoint.` + cmd.Long = `Get metrics of a serving endpoint. Retrieves the metrics associated with the provided serving endpoint in either Prometheus or OpenMetrics exposition format. @@ -293,7 +293,7 @@ func newExportMetrics() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -304,11 +304,12 @@ func newExportMetrics() *cobra.Command { exportMetricsReq.Name = args[0] - err = w.ServingEndpoints.ExportMetrics(ctx, exportMetricsReq) + response, err := w.ServingEndpoints.ExportMetrics(ctx, exportMetricsReq) if err != nil { return err } - return nil + defer response.Contents.Close() + return cmdio.Render(ctx, response.Contents) } // Disable completions since they are not applicable. @@ -323,12 +324,6 @@ func newExportMetrics() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newExportMetrics()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -357,7 +352,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -387,10 +382,65 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) +// start get-open-api command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var getOpenApiOverrides []func( + *cobra.Command, + *serving.GetOpenApiRequest, +) + +func newGetOpenApi() *cobra.Command { + cmd := &cobra.Command{} + + var getOpenApiReq serving.GetOpenApiRequest + + // TODO: short flags + + cmd.Use = "get-open-api NAME" + cmd.Short = `Get the schema for a serving endpoint.` + cmd.Long = `Get the schema for a serving endpoint. + + Get the query schema of the serving endpoint in OpenAPI format. The schema + contains information for the supported paths, input and output format and + datatypes. + + Arguments: + NAME: The name of the serving endpoint that the served model belongs to. This + field is required.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + getOpenApiReq.Name = args[0] + + err = w.ServingEndpoints.GetOpenApi(ctx, getOpenApiReq) + if err != nil { + return err + } + return nil + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range getOpenApiOverrides { + fn(cmd, &getOpenApiReq) + } + + return cmd } // start get-permission-levels command @@ -421,7 +471,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -451,12 +501,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -486,7 +530,7 @@ func newGetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -516,12 +560,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -534,8 +572,8 @@ func newList() *cobra.Command { cmd := &cobra.Command{} cmd.Use = "list" - cmd.Short = `Retrieve all serving endpoints.` - cmd.Long = `Retrieve all serving endpoints.` + cmd.Short = `Get all serving endpoints.` + cmd.Long = `Get all serving endpoints.` cmd.Annotations = make(map[string]string) @@ -543,11 +581,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.ServingEndpoints.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.ServingEndpoints.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -562,12 +597,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start logs command // Slice with functions to override default command behavior. @@ -585,9 +614,8 @@ func newLogs() *cobra.Command { // TODO: short flags cmd.Use = "logs NAME SERVED_MODEL_NAME" - cmd.Short = `Retrieve the most recent log lines associated with a given serving endpoint's served model.` - cmd.Long = `Retrieve the most recent log lines associated with a given serving endpoint's - served model. + cmd.Short = `Get the latest logs for a served model.` + cmd.Long = `Get the latest logs for a served model. Retrieves the service logs associated with the provided served model. @@ -600,7 +628,7 @@ func newLogs() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -631,12 +659,6 @@ func newLogs() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newLogs()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -659,8 +681,8 @@ func newPatch() *cobra.Command { // TODO: array: delete_tags cmd.Use = "patch NAME" - cmd.Short = `Patch the tags of a serving endpoint.` - cmd.Long = `Patch the tags of a serving endpoint. + cmd.Short = `Update tags of a serving endpoint.` + cmd.Long = `Update tags of a serving endpoint. Used to batch add and delete tags from a serving endpoint with a single API call. @@ -672,7 +694,7 @@ func newPatch() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -708,12 +730,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start put command // Slice with functions to override default command behavior. @@ -735,8 +751,8 @@ func newPut() *cobra.Command { // TODO: array: rate_limits cmd.Use = "put NAME" - cmd.Short = `Update the rate limits of a serving endpoint.` - cmd.Long = `Update the rate limits of a serving endpoint. + cmd.Short = `Update rate limits of a serving endpoint.` + cmd.Long = `Update rate limits of a serving endpoint. Used to update the rate limits of a serving endpoint. NOTE: only external and foundation model endpoints are supported as of now. @@ -748,7 +764,7 @@ func newPut() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -784,12 +800,6 @@ func newPut() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPut()) - }) -} - // start query command // Slice with functions to override default command behavior. @@ -823,8 +833,8 @@ func newQuery() *cobra.Command { cmd.Flags().Float64Var(&queryReq.Temperature, "temperature", queryReq.Temperature, `The temperature field used ONLY for __completions__ and __chat external & foundation model__ serving endpoints.`) cmd.Use = "query NAME" - cmd.Short = `Query a serving endpoint with provided model input.` - cmd.Long = `Query a serving endpoint with provided model input. + cmd.Short = `Query a serving endpoint.` + cmd.Long = `Query a serving endpoint. Arguments: NAME: The name of the serving endpoint. This field is required.` @@ -832,7 +842,7 @@ func newQuery() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -868,12 +878,6 @@ func newQuery() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newQuery()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -907,7 +911,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -943,12 +947,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-config command // Slice with functions to override default command behavior. @@ -978,8 +976,8 @@ func newUpdateConfig() *cobra.Command { // TODO: complex arg: traffic_config cmd.Use = "update-config NAME" - cmd.Short = `Update a serving endpoint with a new config.` - cmd.Long = `Update a serving endpoint with a new config. + cmd.Short = `Update config of a serving endpoint.` + cmd.Long = `Update config of a serving endpoint. Updates any combination of the serving endpoint's served entities, the compute configuration of those served entities, and the endpoint's traffic config. An @@ -992,7 +990,7 @@ func newUpdateConfig() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1041,12 +1039,6 @@ func newUpdateConfig() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateConfig()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1080,7 +1072,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -1116,10 +1108,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service ServingEndpoints diff --git a/cmd/workspace/settings/settings.go b/cmd/workspace/settings/settings.go index 193434d4e..214986c76 100755 --- a/cmd/workspace/settings/settings.go +++ b/cmd/workspace/settings/settings.go @@ -3,11 +3,13 @@ package settings import ( - "github.com/databricks/cli/cmd/root" - "github.com/databricks/cli/libs/cmdio" - "github.com/databricks/cli/libs/flags" - "github.com/databricks/databricks-sdk-go/service/settings" "github.com/spf13/cobra" + + automatic_cluster_update "github.com/databricks/cli/cmd/workspace/automatic-cluster-update" + compliance_security_profile "github.com/databricks/cli/cmd/workspace/compliance-security-profile" + default_namespace "github.com/databricks/cli/cmd/workspace/default-namespace" + enhanced_security_monitoring "github.com/databricks/cli/cmd/workspace/enhanced-security-monitoring" + restrict_workspace_admins "github.com/databricks/cli/cmd/workspace/restrict-workspace-admins" ) // Slice with functions to override default command behavior. @@ -16,26 +18,22 @@ var cmdOverrides []func(*cobra.Command) func New() *cobra.Command { cmd := &cobra.Command{ - Use: "settings", - Short: `The default namespace setting API allows users to configure the default namespace for a Databricks workspace.`, - Long: `The default namespace setting API allows users to configure the default - namespace for a Databricks workspace. - - Through this API, users can retrieve, set, or modify the default namespace - used when queries do not reference a fully qualified three-level name. For - example, if you use the API to set 'retail_prod' as the default catalog, then - a query 'SELECT * FROM myTable' would reference the object - 'retail_prod.default.myTable' (the schema 'default' is always assumed). - - This setting requires a restart of clusters and SQL warehouses to take effect. - Additionally, the default namespace only applies when using Unity - Catalog-enabled compute.`, + Use: "settings", + Short: `Workspace Settings API allows users to manage settings at the workspace level.`, + Long: `Workspace Settings API allows users to manage settings at the workspace level.`, GroupID: "settings", Annotations: map[string]string{ "package": "settings", }, } + // Add subservices + cmd.AddCommand(automatic_cluster_update.New()) + cmd.AddCommand(compliance_security_profile.New()) + cmd.AddCommand(default_namespace.New()) + cmd.AddCommand(enhanced_security_monitoring.New()) + cmd.AddCommand(restrict_workspace_admins.New()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -44,226 +42,4 @@ func New() *cobra.Command { return cmd } -// start delete-default-workspace-namespace command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var deleteDefaultWorkspaceNamespaceOverrides []func( - *cobra.Command, - *settings.DeleteDefaultWorkspaceNamespaceRequest, -) - -func newDeleteDefaultWorkspaceNamespace() *cobra.Command { - cmd := &cobra.Command{} - - var deleteDefaultWorkspaceNamespaceReq settings.DeleteDefaultWorkspaceNamespaceRequest - - // TODO: short flags - - cmd.Use = "delete-default-workspace-namespace ETAG" - cmd.Short = `Delete the default namespace setting.` - cmd.Long = `Delete the default namespace setting. - - Deletes the default namespace setting for the workspace. A fresh etag needs to - be provided in DELETE requests (as a query parameter). The etag can be - retrieved by making a GET request before the DELETE request. If the setting is - updated/deleted concurrently, DELETE will fail with 409 and the request will - need to be retried by using the fresh etag in the 409 response. - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - deleteDefaultWorkspaceNamespaceReq.Etag = args[0] - - response, err := w.Settings.DeleteDefaultWorkspaceNamespace(ctx, deleteDefaultWorkspaceNamespaceReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range deleteDefaultWorkspaceNamespaceOverrides { - fn(cmd, &deleteDefaultWorkspaceNamespaceReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteDefaultWorkspaceNamespace()) - }) -} - -// start read-default-workspace-namespace command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var readDefaultWorkspaceNamespaceOverrides []func( - *cobra.Command, - *settings.ReadDefaultWorkspaceNamespaceRequest, -) - -func newReadDefaultWorkspaceNamespace() *cobra.Command { - cmd := &cobra.Command{} - - var readDefaultWorkspaceNamespaceReq settings.ReadDefaultWorkspaceNamespaceRequest - - // TODO: short flags - - cmd.Use = "read-default-workspace-namespace ETAG" - cmd.Short = `Get the default namespace setting.` - cmd.Long = `Get the default namespace setting. - - Gets the default namespace setting. - - Arguments: - ETAG: etag used for versioning. The response is at least as fresh as the eTag - provided. This is used for optimistic concurrency control as a way to help - prevent simultaneous writes of a setting overwriting each other. It is - strongly suggested that systems make use of the etag in the read -> delete - pattern to perform setting deletions in order to avoid race conditions. - That is, get an etag from a GET request, and pass it with the DELETE - request to identify the rule set version you are deleting.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - readDefaultWorkspaceNamespaceReq.Etag = args[0] - - response, err := w.Settings.ReadDefaultWorkspaceNamespace(ctx, readDefaultWorkspaceNamespaceReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range readDefaultWorkspaceNamespaceOverrides { - fn(cmd, &readDefaultWorkspaceNamespaceReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newReadDefaultWorkspaceNamespace()) - }) -} - -// start update-default-workspace-namespace command - -// Slice with functions to override default command behavior. -// Functions can be added from the `init()` function in manually curated files in this directory. -var updateDefaultWorkspaceNamespaceOverrides []func( - *cobra.Command, - *settings.UpdateDefaultWorkspaceNamespaceRequest, -) - -func newUpdateDefaultWorkspaceNamespace() *cobra.Command { - cmd := &cobra.Command{} - - var updateDefaultWorkspaceNamespaceReq settings.UpdateDefaultWorkspaceNamespaceRequest - var updateDefaultWorkspaceNamespaceJson flags.JsonFlag - - // TODO: short flags - cmd.Flags().Var(&updateDefaultWorkspaceNamespaceJson, "json", `either inline JSON string or @path/to/file.json with request body`) - - cmd.Flags().BoolVar(&updateDefaultWorkspaceNamespaceReq.AllowMissing, "allow-missing", updateDefaultWorkspaceNamespaceReq.AllowMissing, `This should always be set to true for Settings API.`) - cmd.Flags().StringVar(&updateDefaultWorkspaceNamespaceReq.FieldMask, "field-mask", updateDefaultWorkspaceNamespaceReq.FieldMask, `Field mask is required to be passed into the PATCH request.`) - // TODO: complex arg: setting - - cmd.Use = "update-default-workspace-namespace" - cmd.Short = `Update the default namespace setting.` - cmd.Long = `Update the default namespace setting. - - Updates the default namespace setting for the workspace. A fresh etag needs to - be provided in PATCH requests (as part of the setting field). The etag can be - retrieved by making a GET request before the PATCH request. Note that if the - setting does not exist, GET will return a NOT_FOUND error and the etag will be - present in the error response, which should be set in the PATCH request. If - the setting is updated concurrently, PATCH will fail with 409 and the request - will need to be retried by using the fresh etag in the 409 response.` - - cmd.Annotations = make(map[string]string) - - cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) - return check(cmd, args) - } - - cmd.PreRunE = root.MustWorkspaceClient - cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { - ctx := cmd.Context() - w := root.WorkspaceClient(ctx) - - if cmd.Flags().Changed("json") { - err = updateDefaultWorkspaceNamespaceJson.Unmarshal(&updateDefaultWorkspaceNamespaceReq) - if err != nil { - return err - } - } - - response, err := w.Settings.UpdateDefaultWorkspaceNamespace(ctx, updateDefaultWorkspaceNamespaceReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) - } - - // Disable completions since they are not applicable. - // Can be overridden by manual implementation in `override.go`. - cmd.ValidArgsFunction = cobra.NoFileCompletions - - // Apply optional overrides to this command. - for _, fn := range updateDefaultWorkspaceNamespaceOverrides { - fn(cmd, &updateDefaultWorkspaceNamespaceReq) - } - - return cmd -} - -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateDefaultWorkspaceNamespace()) - }) -} - // end service Settings diff --git a/cmd/workspace/shares/shares.go b/cmd/workspace/shares/shares.go index 7cb85abfb..c2fd779a7 100755 --- a/cmd/workspace/shares/shares.go +++ b/cmd/workspace/shares/shares.go @@ -31,6 +31,15 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSharePermissions()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -58,6 +67,7 @@ func newCreate() *cobra.Command { cmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`) + cmd.Flags().StringVar(&createReq.StorageRoot, "storage-root", createReq.StorageRoot, `Storage root URL for the share.`) cmd.Use = "create NAME" cmd.Short = `Create a share.` @@ -74,13 +84,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -118,12 +128,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -153,7 +157,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -183,12 +187,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -220,7 +218,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -250,12 +248,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -281,11 +273,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Shares.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Shares.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -300,12 +289,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start share-permissions command // Slice with functions to override default command behavior. @@ -335,7 +318,7 @@ func newSharePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -365,12 +348,6 @@ func newSharePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSharePermissions()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -392,6 +369,7 @@ func newUpdate() *cobra.Command { cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the share.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of share.`) + cmd.Flags().StringVar(&updateReq.StorageRoot, "storage-root", updateReq.StorageRoot, `Storage root URL for the share.`) // TODO: array: updates cmd.Use = "update NAME" @@ -406,6 +384,9 @@ func newUpdate() *cobra.Command { In the case that the share name is changed, **updateShare** requires that the caller is both the share owner and a metastore admin. + If there are notebook files in the share, the __storage_root__ field cannot be + updated. + For each table that is added through this method, the share owner must also have **SELECT** privilege on the table. This privilege must be maintained indefinitely for recipients to be able to access the table. Typically, you @@ -419,7 +400,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -455,12 +436,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -497,7 +472,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -533,10 +508,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Shares diff --git a/cmd/workspace/storage-credentials/overrides.go b/cmd/workspace/storage-credentials/overrides.go index 534e045dd..92dec91eb 100644 --- a/cmd/workspace/storage-credentials/overrides.go +++ b/cmd/workspace/storage-credentials/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *catalog.ListStorageCredentialsRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{header "Credentials"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{header "Credentials"}} {{range .}}{{.Id|green}} {{.Name|cyan}} {{if .AwsIamRole}}{{.AwsIamRole.RoleArn}}{{end}}{{if .AzureServicePrincipal}}{{.AzureServicePrincipal.ApplicationId}}{{end}}{{if .DatabricksGcpServiceAccount}}{{.DatabricksGcpServiceAccount.Email}}{{end}} {{end}}`) } diff --git a/cmd/workspace/storage-credentials/storage-credentials.go b/cmd/workspace/storage-credentials/storage-credentials.go index 910d2b5df..18656a61c 100755 --- a/cmd/workspace/storage-credentials/storage-credentials.go +++ b/cmd/workspace/storage-credentials/storage-credentials.go @@ -39,6 +39,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newValidate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -70,7 +78,7 @@ func newCreate() *cobra.Command { // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `Comment associated with the credential.`) - // TODO: output-only field + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Supplying true to this argument skips validation of the created credential.`) @@ -87,13 +95,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -131,12 +139,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -210,12 +212,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -288,12 +284,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -320,14 +310,13 @@ func newList() *cobra.Command { Gets an array of storage credentials (as __StorageCredentialInfo__ objects). The array is limited to only those storage credentials the caller has permission to access. If the caller is a metastore admin, retrieval of - credentials is unrestricted. For unpaginated request, there is no guarantee of - a specific ordering of the elements in the array. For paginated request, - elements are ordered by their name.` + credentials is unrestricted. There is no guarantee of a specific ordering of + the elements in the array.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -336,11 +325,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.StorageCredentials.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.StorageCredentials.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -355,12 +341,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -384,8 +364,9 @@ func newUpdate() *cobra.Command { // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `Comment associated with the credential.`) - // TODO: output-only field + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if there are dependent external locations or external tables.`) + cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the storage credential.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `Username of current owner of credential.`) cmd.Flags().BoolVar(&updateReq.ReadOnly, "read-only", updateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) @@ -451,12 +432,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start validate command // Slice with functions to override default command behavior. @@ -479,10 +454,10 @@ func newValidate() *cobra.Command { // TODO: complex arg: azure_managed_identity // TODO: complex arg: azure_service_principal // TODO: complex arg: cloudflare_api_token - // TODO: output-only field + // TODO: complex arg: databricks_gcp_service_account cmd.Flags().StringVar(&validateReq.ExternalLocationName, "external-location-name", validateReq.ExternalLocationName, `The name of an existing external location to validate.`) cmd.Flags().BoolVar(&validateReq.ReadOnly, "read-only", validateReq.ReadOnly, `Whether the storage credential is only usable for read operations.`) - // TODO: any: storage_credential_name + cmd.Flags().StringVar(&validateReq.StorageCredentialName, "storage-credential-name", validateReq.StorageCredentialName, `The name of the storage credential to validate.`) cmd.Flags().StringVar(&validateReq.Url, "url", validateReq.Url, `The external location url to validate.`) cmd.Use = "validate" @@ -505,7 +480,7 @@ func newValidate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -540,10 +515,4 @@ func newValidate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newValidate()) - }) -} - // end service StorageCredentials diff --git a/cmd/workspace/system-schemas/system-schemas.go b/cmd/workspace/system-schemas/system-schemas.go index 6dbad5a3f..3fe0580d7 100755 --- a/cmd/workspace/system-schemas/system-schemas.go +++ b/cmd/workspace/system-schemas/system-schemas.go @@ -3,8 +3,6 @@ package system_schemas import ( - "fmt" - "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/cmdio" "github.com/databricks/databricks-sdk-go/service/catalog" @@ -28,6 +26,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDisable()) + cmd.AddCommand(newEnable()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -66,7 +69,7 @@ func newDisable() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -76,10 +79,7 @@ func newDisable() *cobra.Command { w := root.WorkspaceClient(ctx) disableReq.MetastoreId = args[0] - _, err = fmt.Sscan(args[1], &disableReq.SchemaName) - if err != nil { - return fmt.Errorf("invalid SCHEMA_NAME: %s", args[1]) - } + disableReq.SchemaName = args[1] err = w.SystemSchemas.Disable(ctx, disableReq) if err != nil { @@ -100,12 +100,6 @@ func newDisable() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDisable()) - }) -} - // start enable command // Slice with functions to override default command behavior. @@ -136,7 +130,7 @@ func newEnable() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -146,10 +140,7 @@ func newEnable() *cobra.Command { w := root.WorkspaceClient(ctx) enableReq.MetastoreId = args[0] - _, err = fmt.Sscan(args[1], &enableReq.SchemaName) - if err != nil { - return fmt.Errorf("invalid SCHEMA_NAME: %s", args[1]) - } + enableReq.SchemaName = args[1] err = w.SystemSchemas.Enable(ctx, enableReq) if err != nil { @@ -170,12 +161,6 @@ func newEnable() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEnable()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -205,7 +190,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -216,11 +201,8 @@ func newList() *cobra.Command { listReq.MetastoreId = args[0] - response, err := w.SystemSchemas.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.SystemSchemas.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -235,10 +217,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service SystemSchemas diff --git a/cmd/workspace/table-constraints/table-constraints.go b/cmd/workspace/table-constraints/table-constraints.go index e17b95404..166da146c 100755 --- a/cmd/workspace/table-constraints/table-constraints.go +++ b/cmd/workspace/table-constraints/table-constraints.go @@ -39,6 +39,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -115,12 +119,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -161,7 +159,7 @@ func newDelete() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(3) + check := root.ExactArgs(3) return check(cmd, args) } @@ -196,10 +194,4 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // end service TableConstraints diff --git a/cmd/workspace/tables/overrides.go b/cmd/workspace/tables/overrides.go index 35fc351a4..a0849ada7 100644 --- a/cmd/workspace/tables/overrides.go +++ b/cmd/workspace/tables/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *catalog.ListTablesRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "Full Name"}} {{header "Table Type"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "Full Name"}} {{header "Table Type"}} {{range .}}{{.FullName|green}} {{blue "%s" .TableType}} {{end}}`) } diff --git a/cmd/workspace/tables/tables.go b/cmd/workspace/tables/tables.go index e655dfd7c..4564b4fe6 100755 --- a/cmd/workspace/tables/tables.go +++ b/cmd/workspace/tables/tables.go @@ -35,6 +35,14 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newExists()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newList()) + cmd.AddCommand(newListSummaries()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -117,10 +125,81 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) +// start exists command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var existsOverrides []func( + *cobra.Command, + *catalog.ExistsRequest, +) + +func newExists() *cobra.Command { + cmd := &cobra.Command{} + + var existsReq catalog.ExistsRequest + + // TODO: short flags + + cmd.Use = "exists FULL_NAME" + cmd.Short = `Get boolean reflecting if table exists.` + cmd.Long = `Get boolean reflecting if table exists. + + Gets if a table exists in the metastore for a specific catalog and schema. The + caller must satisfy one of the following requirements: * Be a metastore admin + * Be the owner of the parent catalog * Be the owner of the parent schema and + have the USE_CATALOG privilege on the parent catalog * Have the + **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** + privilege on the parent schema, and either be the table owner or have the + SELECT privilege on the table. * Have BROWSE privilege on the parent catalog * + Have BROWSE privilege on the parent schema. + + Arguments: + FULL_NAME: Full name of the table.` + + cmd.Annotations = make(map[string]string) + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No FULL_NAME argument specified. Loading names for Tables drop-down." + names, err := w.Tables.TableInfoNameToTableIdMap(ctx, catalog.ListTablesRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Tables drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Full name of the table") + if err != nil { + return err + } + args = append(args, id) + } + if len(args) != 1 { + return fmt.Errorf("expected to have full name of the table") + } + existsReq.FullName = args[0] + + response, err := w.Tables.Exists(ctx, existsReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range existsOverrides { + fn(cmd, &existsReq) + } + + return cmd } // start get command @@ -139,6 +218,7 @@ func newGet() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&getReq.IncludeBrowse, "include-browse", getReq.IncludeBrowse, `Whether to include tables in the response for which the principal can only access selective metadata for.`) cmd.Flags().BoolVar(&getReq.IncludeDeltaMetadata, "include-delta-metadata", getReq.IncludeDeltaMetadata, `Whether delta metadata should be included in the response.`) cmd.Use = "get FULL_NAME" @@ -146,10 +226,12 @@ func newGet() *cobra.Command { cmd.Long = `Get a table. Gets a table from the metastore for a specific catalog and schema. The caller - must be a metastore admin, be the owner of the table and have the - **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** - privilege on the parent schema, or be the owner of the table and have the - **SELECT** privilege on it as well. + must satisfy one of the following requirements: * Be a metastore admin * Be + the owner of the parent catalog * Be the owner of the parent schema and have + the USE_CATALOG privilege on the parent catalog * Have the **USE_CATALOG** + privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent + schema, and either be the table owner or have the SELECT privilege on the + table. Arguments: FULL_NAME: Full name of the table.` @@ -199,12 +281,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -221,6 +297,7 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include tables in the response for which the principal can only access selective metadata for.`) cmd.Flags().BoolVar(&listReq.IncludeDeltaMetadata, "include-delta-metadata", listReq.IncludeDeltaMetadata, `Whether delta metadata should be included in the response.`) cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of tables to return.`) cmd.Flags().BoolVar(&listReq.OmitColumns, "omit-columns", listReq.OmitColumns, `Whether to omit the columns of the table from the response or not.`) @@ -245,7 +322,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -257,11 +334,8 @@ func newList() *cobra.Command { listReq.CatalogName = args[0] listReq.SchemaName = args[1] - response, err := w.Tables.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Tables.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -276,12 +350,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start list-summaries command // Slice with functions to override default command behavior. @@ -348,11 +416,8 @@ func newListSummaries() *cobra.Command { } listSummariesReq.CatalogName = args[0] - response, err := w.Tables.ListSummariesAll(ctx, listSummariesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Tables.ListSummaries(ctx, listSummariesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -367,12 +432,6 @@ func newListSummaries() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListSummaries()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -460,10 +519,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Tables diff --git a/cmd/workspace/token-management/overrides.go b/cmd/workspace/token-management/overrides.go index 46967d37a..8122c1a1b 100644 --- a/cmd/workspace/token-management/overrides.go +++ b/cmd/workspace/token-management/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *settings.ListTokenManagementRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Created By"}} {{header "Comment"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Created By"}} {{header "Comment"}} {{range .}}{{.TokenId|green}} {{.CreatedByUsername|cyan}} {{.Comment|cyan}} {{end}}`) } diff --git a/cmd/workspace/token-management/token-management.go b/cmd/workspace/token-management/token-management.go index 956555b6d..dea94edb0 100755 --- a/cmd/workspace/token-management/token-management.go +++ b/cmd/workspace/token-management/token-management.go @@ -29,6 +29,16 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateOboToken()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -56,29 +66,28 @@ func newCreateOboToken() *cobra.Command { cmd.Flags().Var(&createOboTokenJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&createOboTokenReq.Comment, "comment", createOboTokenReq.Comment, `Comment that describes the purpose of the token.`) + cmd.Flags().Int64Var(&createOboTokenReq.LifetimeSeconds, "lifetime-seconds", createOboTokenReq.LifetimeSeconds, `The number of seconds before the token expires.`) - cmd.Use = "create-obo-token APPLICATION_ID LIFETIME_SECONDS" + cmd.Use = "create-obo-token APPLICATION_ID" cmd.Short = `Create on-behalf token.` cmd.Long = `Create on-behalf token. - + Creates a token on behalf of a service principal. Arguments: - APPLICATION_ID: Application ID of the service principal. - LIFETIME_SECONDS: The number of seconds before the token expires.` + APPLICATION_ID: Application ID of the service principal.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'application_id', 'lifetime_seconds' in your JSON input") + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'application_id' in your JSON input") } return nil } - check := cobra.ExactArgs(2) - return check(cmd, args) + return nil } cmd.PreRunE = root.MustWorkspaceClient @@ -91,15 +100,25 @@ func newCreateOboToken() *cobra.Command { if err != nil { return err } - } - if !cmd.Flags().Changed("json") { - createOboTokenReq.ApplicationId = args[0] - } - if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[1], &createOboTokenReq.LifetimeSeconds) - if err != nil { - return fmt.Errorf("invalid LIFETIME_SECONDS: %s", args[1]) + } else { + if len(args) == 0 { + promptSpinner := cmdio.Spinner(ctx) + promptSpinner <- "No APPLICATION_ID argument specified. Loading names for Token Management drop-down." + names, err := w.TokenManagement.TokenInfoCommentToTokenIdMap(ctx, settings.ListTokenManagementRequest{}) + close(promptSpinner) + if err != nil { + return fmt.Errorf("failed to load names for Token Management drop-down. Please manually specify required arguments. Original error: %w", err) + } + id, err := cmdio.Select(ctx, names, "Application ID of the service principal") + if err != nil { + return err + } + args = append(args, id) } + if len(args) != 1 { + return fmt.Errorf("expected to have application id of the service principal") + } + createOboTokenReq.ApplicationId = args[0] } response, err := w.TokenManagement.CreateOboToken(ctx, createOboTokenReq) @@ -121,12 +140,6 @@ func newCreateOboToken() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateOboToken()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -146,7 +159,7 @@ func newDelete() *cobra.Command { cmd.Use = "delete TOKEN_ID" cmd.Short = `Delete a token.` cmd.Long = `Delete a token. - + Deletes a token, specified by its ID. Arguments: @@ -197,12 +210,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -222,7 +229,7 @@ func newGet() *cobra.Command { cmd.Use = "get TOKEN_ID" cmd.Short = `Get token info.` cmd.Long = `Get token info. - + Gets information about a token, specified by its ID. Arguments: @@ -273,12 +280,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -293,7 +294,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Use = "get-permission-levels" cmd.Short = `Get token permission levels.` cmd.Long = `Get token permission levels. - + Gets the permission levels that a user can have on an object.` cmd.Annotations = make(map[string]string) @@ -321,12 +322,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -341,7 +336,7 @@ func newGetPermissions() *cobra.Command { cmd.Use = "get-permissions" cmd.Short = `Get token permissions.` cmd.Long = `Get token permissions. - + Gets the permissions of all tokens. Tokens can inherit permissions from their root object.` @@ -370,12 +365,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -398,13 +387,13 @@ func newList() *cobra.Command { cmd.Use = "list" cmd.Short = `List all tokens.` cmd.Long = `List all tokens. - + Lists all tokens associated with the specified workspace or user.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -413,11 +402,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.TokenManagement.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.TokenManagement.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -432,12 +418,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -461,14 +441,14 @@ func newSetPermissions() *cobra.Command { cmd.Use = "set-permissions" cmd.Short = `Set token permissions.` cmd.Long = `Set token permissions. - + Sets permissions on all tokens. Tokens can inherit permissions from their root object.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -503,12 +483,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -532,14 +506,14 @@ func newUpdatePermissions() *cobra.Command { cmd.Use = "update-permissions" cmd.Short = `Update token permissions.` cmd.Long = `Update token permissions. - + Updates the permissions on all tokens. Tokens can inherit permissions from their root object.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -574,10 +548,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service TokenManagement diff --git a/cmd/workspace/tokens/overrides.go b/cmd/workspace/tokens/overrides.go index 09c51758e..142902da4 100644 --- a/cmd/workspace/tokens/overrides.go +++ b/cmd/workspace/tokens/overrides.go @@ -6,8 +6,9 @@ import ( ) func listOverride(listCmd *cobra.Command) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Expiry time"}} {{header "Comment"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Expiry time"}} {{header "Comment"}} {{range .}}{{.TokenId|green}} {{cyan "%d" .ExpiryTime}} {{.Comment|cyan}} {{end}}`) } diff --git a/cmd/workspace/tokens/tokens.go b/cmd/workspace/tokens/tokens.go index cd82ef63f..afe4b9a03 100755 --- a/cmd/workspace/tokens/tokens.go +++ b/cmd/workspace/tokens/tokens.go @@ -28,6 +28,11 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -69,7 +74,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -104,12 +109,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -144,7 +143,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'token_id' in your JSON input") } @@ -203,12 +202,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -232,11 +225,8 @@ func newList() *cobra.Command { cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Tokens.ListAll(ctx) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Tokens.List(ctx) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -251,10 +241,4 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // end service Tokens diff --git a/cmd/workspace/users/users.go b/cmd/workspace/users/users.go index 4cc485e96..53ba2e85c 100755 --- a/cmd/workspace/users/users.go +++ b/cmd/workspace/users/users.go @@ -37,6 +37,18 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newList()) + cmd.AddCommand(newPatch()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -85,7 +97,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -120,12 +132,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -197,12 +203,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -281,12 +281,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -329,12 +323,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -378,12 +366,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -417,7 +399,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -426,11 +408,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Users.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Users.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -445,12 +424,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start patch command // Slice with functions to override default command behavior. @@ -533,12 +506,6 @@ func newPatch() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newPatch()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -569,7 +536,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -604,12 +571,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -701,12 +662,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -737,7 +692,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -772,10 +727,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Users diff --git a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go index d429267ad..dd9d57835 100755 --- a/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go +++ b/cmd/workspace/vector-search-endpoints/vector-search-endpoints.go @@ -28,6 +28,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateEndpoint()) + cmd.AddCommand(newDeleteEndpoint()) + cmd.AddCommand(newGetEndpoint()) + cmd.AddCommand(newListEndpoints()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -73,13 +79,13 @@ func newCreateEndpoint() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'endpoint_type' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -142,12 +148,6 @@ func newCreateEndpoint() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateEndpoint()) - }) -} - // start delete-endpoint command // Slice with functions to override default command behavior. @@ -164,18 +164,17 @@ func newDeleteEndpoint() *cobra.Command { // TODO: short flags - cmd.Use = "delete-endpoint ENDPOINT_NAME NAME" + cmd.Use = "delete-endpoint ENDPOINT_NAME" cmd.Short = `Delete an endpoint.` cmd.Long = `Delete an endpoint. Arguments: - ENDPOINT_NAME: Name of the endpoint - NAME: Name of the endpoint to delete` + ENDPOINT_NAME: Name of the endpoint` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(1) return check(cmd, args) } @@ -185,7 +184,6 @@ func newDeleteEndpoint() *cobra.Command { w := root.WorkspaceClient(ctx) deleteEndpointReq.EndpointName = args[0] - deleteEndpointReq.Name = args[1] err = w.VectorSearchEndpoints.DeleteEndpoint(ctx, deleteEndpointReq) if err != nil { @@ -206,12 +204,6 @@ func newDeleteEndpoint() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteEndpoint()) - }) -} - // start get-endpoint command // Slice with functions to override default command behavior. @@ -238,7 +230,7 @@ func newGetEndpoint() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -268,12 +260,6 @@ func newGetEndpoint() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetEndpoint()) - }) -} - // start list-endpoints command // Slice with functions to override default command behavior. @@ -299,7 +285,7 @@ func newListEndpoints() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -308,11 +294,8 @@ func newListEndpoints() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.VectorSearchEndpoints.ListEndpointsAll(ctx, listEndpointsReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.VectorSearchEndpoints.ListEndpoints(ctx, listEndpointsReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -327,10 +310,4 @@ func newListEndpoints() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListEndpoints()) - }) -} - // end service VectorSearchEndpoints diff --git a/cmd/workspace/vector-search-indexes/vector-search-indexes.go b/cmd/workspace/vector-search-indexes/vector-search-indexes.go index 8999967f1..158474770 100755 --- a/cmd/workspace/vector-search-indexes/vector-search-indexes.go +++ b/cmd/workspace/vector-search-indexes/vector-search-indexes.go @@ -35,6 +35,18 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreateIndex()) + cmd.AddCommand(newDeleteDataVectorIndex()) + cmd.AddCommand(newDeleteIndex()) + cmd.AddCommand(newGetIndex()) + cmd.AddCommand(newListIndexes()) + cmd.AddCommand(newQueryIndex()) + cmd.AddCommand(newQueryNextPage()) + cmd.AddCommand(newScanIndex()) + cmd.AddCommand(newSyncIndex()) + cmd.AddCommand(newUpsertDataVectorIndex()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -61,11 +73,10 @@ func newCreateIndex() *cobra.Command { // TODO: short flags cmd.Flags().Var(&createIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`) - // TODO: complex arg: delta_sync_vector_index_spec + // TODO: complex arg: delta_sync_index_spec // TODO: complex arg: direct_access_index_spec - cmd.Flags().StringVar(&createIndexReq.EndpointName, "endpoint-name", createIndexReq.EndpointName, `Name of the endpoint to be used for serving the index.`) - cmd.Use = "create-index NAME PRIMARY_KEY INDEX_TYPE" + cmd.Use = "create-index NAME ENDPOINT_NAME PRIMARY_KEY INDEX_TYPE" cmd.Short = `Create an index.` cmd.Long = `Create an index. @@ -73,6 +84,7 @@ func newCreateIndex() *cobra.Command { Arguments: NAME: Name of the index + ENDPOINT_NAME: Name of the endpoint to be used for serving the index PRIMARY_KEY: Primary key of the index INDEX_TYPE: There are 2 types of Vector Search indexes: @@ -86,13 +98,13 @@ func newCreateIndex() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'primary_key', 'index_type' in your JSON input") + return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'name', 'endpoint_name', 'primary_key', 'index_type' in your JSON input") } return nil } - check := cobra.ExactArgs(3) + check := root.ExactArgs(4) return check(cmd, args) } @@ -111,12 +123,15 @@ func newCreateIndex() *cobra.Command { createIndexReq.Name = args[0] } if !cmd.Flags().Changed("json") { - createIndexReq.PrimaryKey = args[1] + createIndexReq.EndpointName = args[1] } if !cmd.Flags().Changed("json") { - _, err = fmt.Sscan(args[2], &createIndexReq.IndexType) + createIndexReq.PrimaryKey = args[2] + } + if !cmd.Flags().Changed("json") { + _, err = fmt.Sscan(args[3], &createIndexReq.IndexType) if err != nil { - return fmt.Errorf("invalid INDEX_TYPE: %s", args[2]) + return fmt.Errorf("invalid INDEX_TYPE: %s", args[3]) } } @@ -139,12 +154,6 @@ func newCreateIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreateIndex()) - }) -} - // start delete-data-vector-index command // Slice with functions to override default command behavior. @@ -163,20 +172,20 @@ func newDeleteDataVectorIndex() *cobra.Command { // TODO: short flags cmd.Flags().Var(&deleteDataVectorIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "delete-data-vector-index NAME" + cmd.Use = "delete-data-vector-index INDEX_NAME" cmd.Short = `Delete data from index.` cmd.Long = `Delete data from index. Handles the deletion of data from a specified vector index. Arguments: - NAME: Name of the vector index where data is to be deleted. Must be a Direct + INDEX_NAME: Name of the vector index where data is to be deleted. Must be a Direct Vector Access Index.` cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -193,7 +202,7 @@ func newDeleteDataVectorIndex() *cobra.Command { } else { return fmt.Errorf("please provide command input in JSON format by specifying the --json flag") } - deleteDataVectorIndexReq.Name = args[0] + deleteDataVectorIndexReq.IndexName = args[0] response, err := w.VectorSearchIndexes.DeleteDataVectorIndex(ctx, deleteDataVectorIndexReq) if err != nil { @@ -214,12 +223,6 @@ func newDeleteDataVectorIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteDataVectorIndex()) - }) -} - // start delete-index command // Slice with functions to override default command behavior. @@ -248,7 +251,7 @@ func newDeleteIndex() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -278,12 +281,6 @@ func newDeleteIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDeleteIndex()) - }) -} - // start get-index command // Slice with functions to override default command behavior. @@ -312,7 +309,7 @@ func newGetIndex() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -342,12 +339,6 @@ func newGetIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetIndex()) - }) -} - // start list-indexes command // Slice with functions to override default command behavior. @@ -378,7 +369,7 @@ func newListIndexes() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -389,11 +380,8 @@ func newListIndexes() *cobra.Command { listIndexesReq.EndpointName = args[0] - response, err := w.VectorSearchIndexes.ListIndexesAll(ctx, listIndexesReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.VectorSearchIndexes.ListIndexes(ctx, listIndexesReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -408,12 +396,6 @@ func newListIndexes() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newListIndexes()) - }) -} - // start query-index command // Slice with functions to override default command behavior. @@ -435,7 +417,9 @@ func newQueryIndex() *cobra.Command { cmd.Flags().StringVar(&queryIndexReq.FiltersJson, "filters-json", queryIndexReq.FiltersJson, `JSON string representing query filters.`) cmd.Flags().IntVar(&queryIndexReq.NumResults, "num-results", queryIndexReq.NumResults, `Number of results to return.`) cmd.Flags().StringVar(&queryIndexReq.QueryText, "query-text", queryIndexReq.QueryText, `Query text.`) + cmd.Flags().StringVar(&queryIndexReq.QueryType, "query-type", queryIndexReq.QueryType, `The query type to use.`) // TODO: array: query_vector + cmd.Flags().Float64Var(&queryIndexReq.ScoreThreshold, "score-threshold", queryIndexReq.ScoreThreshold, `Threshold for the approximate nearest neighbor search.`) cmd.Use = "query-index INDEX_NAME" cmd.Short = `Query an index.` @@ -449,7 +433,7 @@ func newQueryIndex() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -487,10 +471,144 @@ func newQueryIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newQueryIndex()) - }) +// start query-next-page command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var queryNextPageOverrides []func( + *cobra.Command, + *vectorsearch.QueryVectorIndexNextPageRequest, +) + +func newQueryNextPage() *cobra.Command { + cmd := &cobra.Command{} + + var queryNextPageReq vectorsearch.QueryVectorIndexNextPageRequest + var queryNextPageJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&queryNextPageJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&queryNextPageReq.EndpointName, "endpoint-name", queryNextPageReq.EndpointName, `Name of the endpoint.`) + cmd.Flags().StringVar(&queryNextPageReq.PageToken, "page-token", queryNextPageReq.PageToken, `Page token returned from previous QueryVectorIndex or QueryVectorIndexNextPage API.`) + + cmd.Use = "query-next-page INDEX_NAME" + cmd.Short = `Query next page.` + cmd.Long = `Query next page. + + Use next_page_token returned from previous QueryVectorIndex or + QueryVectorIndexNextPage request to fetch next page of results. + + Arguments: + INDEX_NAME: Name of the vector index to query.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = queryNextPageJson.Unmarshal(&queryNextPageReq) + if err != nil { + return err + } + } + queryNextPageReq.IndexName = args[0] + + response, err := w.VectorSearchIndexes.QueryNextPage(ctx, queryNextPageReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range queryNextPageOverrides { + fn(cmd, &queryNextPageReq) + } + + return cmd +} + +// start scan-index command + +// Slice with functions to override default command behavior. +// Functions can be added from the `init()` function in manually curated files in this directory. +var scanIndexOverrides []func( + *cobra.Command, + *vectorsearch.ScanVectorIndexRequest, +) + +func newScanIndex() *cobra.Command { + cmd := &cobra.Command{} + + var scanIndexReq vectorsearch.ScanVectorIndexRequest + var scanIndexJson flags.JsonFlag + + // TODO: short flags + cmd.Flags().Var(&scanIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`) + + cmd.Flags().StringVar(&scanIndexReq.LastPrimaryKey, "last-primary-key", scanIndexReq.LastPrimaryKey, `Primary key of the last entry returned in the previous scan.`) + cmd.Flags().IntVar(&scanIndexReq.NumResults, "num-results", scanIndexReq.NumResults, `Number of results to return.`) + + cmd.Use = "scan-index INDEX_NAME" + cmd.Short = `Scan an index.` + cmd.Long = `Scan an index. + + Scan the specified vector index and return the first num_results entries + after the exclusive primary_key. + + Arguments: + INDEX_NAME: Name of the vector index to scan.` + + cmd.Annotations = make(map[string]string) + + cmd.Args = func(cmd *cobra.Command, args []string) error { + check := root.ExactArgs(1) + return check(cmd, args) + } + + cmd.PreRunE = root.MustWorkspaceClient + cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { + ctx := cmd.Context() + w := root.WorkspaceClient(ctx) + + if cmd.Flags().Changed("json") { + err = scanIndexJson.Unmarshal(&scanIndexReq) + if err != nil { + return err + } + } + scanIndexReq.IndexName = args[0] + + response, err := w.VectorSearchIndexes.ScanIndex(ctx, scanIndexReq) + if err != nil { + return err + } + return cmdio.Render(ctx, response) + } + + // Disable completions since they are not applicable. + // Can be overridden by manual implementation in `override.go`. + cmd.ValidArgsFunction = cobra.NoFileCompletions + + // Apply optional overrides to this command. + for _, fn := range scanIndexOverrides { + fn(cmd, &scanIndexReq) + } + + return cmd } // start sync-index command @@ -521,7 +639,7 @@ func newSyncIndex() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -551,12 +669,6 @@ func newSyncIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSyncIndex()) - }) -} - // start upsert-data-vector-index command // Slice with functions to override default command behavior. @@ -575,14 +687,14 @@ func newUpsertDataVectorIndex() *cobra.Command { // TODO: short flags cmd.Flags().Var(&upsertDataVectorIndexJson, "json", `either inline JSON string or @path/to/file.json with request body`) - cmd.Use = "upsert-data-vector-index NAME INPUTS_JSON" + cmd.Use = "upsert-data-vector-index INDEX_NAME INPUTS_JSON" cmd.Short = `Upsert data into an index.` cmd.Long = `Upsert data into an index. Handles the upserting of data into a specified vector index. Arguments: - NAME: Name of the vector index where data is to be upserted. Must be a Direct + INDEX_NAME: Name of the vector index where data is to be upserted. Must be a Direct Vector Access Index. INPUTS_JSON: JSON string representing the data to be upserted.` @@ -590,13 +702,13 @@ func newUpsertDataVectorIndex() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(1)(cmd, args) + err := root.ExactArgs(1)(cmd, args) if err != nil { - return fmt.Errorf("when --json flag is specified, provide only NAME as positional arguments. Provide 'inputs_json' in your JSON input") + return fmt.Errorf("when --json flag is specified, provide only INDEX_NAME as positional arguments. Provide 'inputs_json' in your JSON input") } return nil } - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -611,7 +723,7 @@ func newUpsertDataVectorIndex() *cobra.Command { return err } } - upsertDataVectorIndexReq.Name = args[0] + upsertDataVectorIndexReq.IndexName = args[0] if !cmd.Flags().Changed("json") { upsertDataVectorIndexReq.InputsJson = args[1] } @@ -635,10 +747,4 @@ func newUpsertDataVectorIndex() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpsertDataVectorIndex()) - }) -} - // end service VectorSearchIndexes diff --git a/cmd/workspace/volumes/volumes.go b/cmd/workspace/volumes/volumes.go index 77b601819..3fc1f447b 100755 --- a/cmd/workspace/volumes/volumes.go +++ b/cmd/workspace/volumes/volumes.go @@ -34,6 +34,13 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newList()) + cmd.AddCommand(newRead()) + cmd.AddCommand(newUpdate()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -96,13 +103,13 @@ func newCreate() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'catalog_name', 'schema_name', 'name', 'volume_type' in your JSON input") } return nil } - check := cobra.ExactArgs(4) + check := root.ExactArgs(4) return check(cmd, args) } @@ -152,12 +159,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -174,7 +175,7 @@ func newDelete() *cobra.Command { // TODO: short flags - cmd.Use = "delete FULL_NAME_ARG" + cmd.Use = "delete NAME" cmd.Short = `Delete a Volume.` cmd.Long = `Delete a Volume. @@ -185,7 +186,7 @@ func newDelete() *cobra.Command { on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. Arguments: - FULL_NAME_ARG: The three-level (fully qualified) name of the volume` + NAME: The three-level (fully qualified) name of the volume` cmd.Annotations = make(map[string]string) @@ -196,7 +197,7 @@ func newDelete() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down." + promptSpinner <- "No NAME argument specified. Loading names for Volumes drop-down." names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{}) close(promptSpinner) if err != nil { @@ -211,7 +212,7 @@ func newDelete() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the three-level (fully qualified) name of the volume") } - deleteReq.FullNameArg = args[0] + deleteReq.Name = args[0] err = w.Volumes.Delete(ctx, deleteReq) if err != nil { @@ -232,12 +233,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -254,12 +249,16 @@ func newList() *cobra.Command { // TODO: short flags + cmd.Flags().BoolVar(&listReq.IncludeBrowse, "include-browse", listReq.IncludeBrowse, `Whether to include volumes in the response for which the principal can only access selective metadata for.`) + cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Maximum number of volumes to return (page length).`) + cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `Opaque token returned by a previous request.`) + cmd.Use = "list CATALOG_NAME SCHEMA_NAME" cmd.Short = `List Volumes.` cmd.Long = `List Volumes. - Gets an array of all volumes for the current metastore under the parent - catalog and schema. + Gets an array of volumes for the current metastore under the parent catalog + and schema. The returned volumes are filtered based on the privileges of the calling user. For example, the metastore admin is able to list all the volumes. A regular @@ -274,13 +273,10 @@ func newList() *cobra.Command { CATALOG_NAME: The identifier of the catalog SCHEMA_NAME: The identifier of the schema` - // This command is being previewed; hide from help output. - cmd.Hidden = true - cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -292,11 +288,8 @@ func newList() *cobra.Command { listReq.CatalogName = args[0] listReq.SchemaName = args[1] - response, err := w.Volumes.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Volumes.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -311,12 +304,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start read command // Slice with functions to override default command behavior. @@ -333,7 +320,9 @@ func newRead() *cobra.Command { // TODO: short flags - cmd.Use = "read FULL_NAME_ARG" + cmd.Flags().BoolVar(&readReq.IncludeBrowse, "include-browse", readReq.IncludeBrowse, `Whether to include volumes in the response for which the principal can only access selective metadata for.`) + + cmd.Use = "read NAME" cmd.Short = `Get a Volume.` cmd.Long = `Get a Volume. @@ -345,7 +334,7 @@ func newRead() *cobra.Command { the **USE_SCHEMA** privilege on the parent schema. Arguments: - FULL_NAME_ARG: The three-level (fully qualified) name of the volume` + NAME: The three-level (fully qualified) name of the volume` cmd.Annotations = make(map[string]string) @@ -356,7 +345,7 @@ func newRead() *cobra.Command { if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down." + promptSpinner <- "No NAME argument specified. Loading names for Volumes drop-down." names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{}) close(promptSpinner) if err != nil { @@ -371,7 +360,7 @@ func newRead() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the three-level (fully qualified) name of the volume") } - readReq.FullNameArg = args[0] + readReq.Name = args[0] response, err := w.Volumes.Read(ctx, readReq) if err != nil { @@ -392,12 +381,6 @@ func newRead() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newRead()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -417,11 +400,10 @@ func newUpdate() *cobra.Command { cmd.Flags().Var(&updateJson, "json", `either inline JSON string or @path/to/file.json with request body`) cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `The comment attached to the volume.`) - cmd.Flags().StringVar(&updateReq.Name, "name", updateReq.Name, `The name of the volume.`) cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the volume.`) cmd.Flags().StringVar(&updateReq.Owner, "owner", updateReq.Owner, `The identifier of the user who owns the volume.`) - cmd.Use = "update FULL_NAME_ARG" + cmd.Use = "update NAME" cmd.Short = `Update a Volume.` cmd.Long = `Update a Volume. @@ -435,7 +417,7 @@ func newUpdate() *cobra.Command { updated. Arguments: - FULL_NAME_ARG: The three-level (fully qualified) name of the volume` + NAME: The three-level (fully qualified) name of the volume` cmd.Annotations = make(map[string]string) @@ -452,7 +434,7 @@ func newUpdate() *cobra.Command { } if len(args) == 0 { promptSpinner := cmdio.Spinner(ctx) - promptSpinner <- "No FULL_NAME_ARG argument specified. Loading names for Volumes drop-down." + promptSpinner <- "No NAME argument specified. Loading names for Volumes drop-down." names, err := w.Volumes.VolumeInfoNameToVolumeIdMap(ctx, catalog.ListVolumesRequest{}) close(promptSpinner) if err != nil { @@ -467,7 +449,7 @@ func newUpdate() *cobra.Command { if len(args) != 1 { return fmt.Errorf("expected to have the three-level (fully qualified) name of the volume") } - updateReq.FullNameArg = args[0] + updateReq.Name = args[0] response, err := w.Volumes.Update(ctx, updateReq) if err != nil { @@ -488,10 +470,4 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // end service Volumes diff --git a/cmd/workspace/warehouses/overrides.go b/cmd/workspace/warehouses/overrides.go index 0714937c2..9457557d0 100644 --- a/cmd/workspace/warehouses/overrides.go +++ b/cmd/workspace/warehouses/overrides.go @@ -7,8 +7,9 @@ import ( ) func listOverride(listCmd *cobra.Command, listReq *sql.ListWarehousesRequest) { + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Name"}} {{header "Size"}} {{header "State"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Name"}} {{header "Size"}} {{header "State"}} {{range .}}{{.Id|green}} {{.Name|cyan}} {{.ClusterSize|cyan}} {{if eq .State "RUNNING"}}{{"RUNNING"|green}}{{else if eq .State "STOPPED"}}{{"STOPPED"|red}}{{else}}{{blue "%s" .State}}{{end}} {{end}}`) } diff --git a/cmd/workspace/warehouses/warehouses.go b/cmd/workspace/warehouses/warehouses.go index c64788b89..cdf106365 100755 --- a/cmd/workspace/warehouses/warehouses.go +++ b/cmd/workspace/warehouses/warehouses.go @@ -30,6 +30,21 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newCreate()) + cmd.AddCommand(newDelete()) + cmd.AddCommand(newEdit()) + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetWorkspaceWarehouseConfig()) + cmd.AddCommand(newList()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newSetWorkspaceWarehouseConfig()) + cmd.AddCommand(newStart()) + cmd.AddCommand(newStop()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -84,7 +99,7 @@ func newCreate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -138,12 +153,6 @@ func newCreate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newCreate()) - }) -} - // start delete command // Slice with functions to override default command behavior. @@ -214,12 +223,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start edit command // Slice with functions to override default command behavior. @@ -336,12 +339,6 @@ func newEdit() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newEdit()) - }) -} - // start get command // Slice with functions to override default command behavior. @@ -417,12 +414,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -493,12 +484,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -570,12 +555,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-workspace-warehouse-config command // Slice with functions to override default command behavior. @@ -619,12 +598,6 @@ func newGetWorkspaceWarehouseConfig() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetWorkspaceWarehouseConfig()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -652,7 +625,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -661,11 +634,8 @@ func newList() *cobra.Command { ctx := cmd.Context() w := root.WorkspaceClient(ctx) - response, err := w.Warehouses.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Warehouses.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -680,12 +650,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -767,12 +731,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start set-workspace-warehouse-config command // Slice with functions to override default command behavior. @@ -811,7 +769,7 @@ func newSetWorkspaceWarehouseConfig() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(0) + check := root.ExactArgs(0) return check(cmd, args) } @@ -846,12 +804,6 @@ func newSetWorkspaceWarehouseConfig() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetWorkspaceWarehouseConfig()) - }) -} - // start start command // Slice with functions to override default command behavior. @@ -946,12 +898,6 @@ func newStart() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStart()) - }) -} - // start stop command // Slice with functions to override default command behavior. @@ -1046,12 +992,6 @@ func newStop() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newStop()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -1133,10 +1073,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Warehouses diff --git a/cmd/workspace/workspace-bindings/workspace-bindings.go b/cmd/workspace/workspace-bindings/workspace-bindings.go index f8d31fa45..b7e0614ea 100755 --- a/cmd/workspace/workspace-bindings/workspace-bindings.go +++ b/cmd/workspace/workspace-bindings/workspace-bindings.go @@ -42,6 +42,12 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGet()) + cmd.AddCommand(newGetBindings()) + cmd.AddCommand(newUpdate()) + cmd.AddCommand(newUpdateBindings()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -79,7 +85,7 @@ func newGet() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -109,12 +115,6 @@ func newGet() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGet()) - }) -} - // start get-bindings command // Slice with functions to override default command behavior. @@ -145,7 +145,7 @@ func newGetBindings() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -176,12 +176,6 @@ func newGetBindings() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetBindings()) - }) -} - // start update command // Slice with functions to override default command behavior. @@ -216,7 +210,7 @@ func newUpdate() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -252,12 +246,6 @@ func newUpdate() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdate()) - }) -} - // start update-bindings command // Slice with functions to override default command behavior. @@ -293,7 +281,7 @@ func newUpdateBindings() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -330,10 +318,4 @@ func newUpdateBindings() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdateBindings()) - }) -} - // end service WorkspaceBindings diff --git a/cmd/workspace/workspace-conf/workspace-conf.go b/cmd/workspace/workspace-conf/workspace-conf.go index 99207ffad..92b2f0f3a 100755 --- a/cmd/workspace/workspace-conf/workspace-conf.go +++ b/cmd/workspace/workspace-conf/workspace-conf.go @@ -27,6 +27,10 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newGetStatus()) + cmd.AddCommand(newSetStatus()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -60,7 +64,7 @@ func newGetStatus() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -90,12 +94,6 @@ func newGetStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetStatus()) - }) -} - // start set-status command // Slice with functions to override default command behavior. @@ -156,10 +154,4 @@ func newSetStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetStatus()) - }) -} - // end service WorkspaceConf diff --git a/cmd/workspace/workspace/export_dir.go b/cmd/workspace/workspace/export_dir.go index d2a86d009..0b53666f9 100644 --- a/cmd/workspace/workspace/export_dir.go +++ b/cmd/workspace/workspace/export_dir.go @@ -55,7 +55,7 @@ func (opts exportDirOptions) callback(ctx context.Context, workspaceFiler filer. // If a file exists, and overwrite is not set, we skip exporting the file if _, err := os.Stat(targetPath); err == nil && !overwrite { // Log event that this file/directory has been skipped - return cmdio.RenderWithTemplate(ctx, newFileSkippedEvent(relPath, targetPath), "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n") + return cmdio.RenderWithTemplate(ctx, newFileSkippedEvent(relPath, targetPath), "", "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n") } // create the file @@ -74,7 +74,7 @@ func (opts exportDirOptions) callback(ctx context.Context, workspaceFiler filer. if err != nil { return err } - return cmdio.RenderWithTemplate(ctx, newFileExportedEvent(sourcePath, targetPath), "{{.SourcePath}} -> {{.TargetPath}}\n") + return cmdio.RenderWithTemplate(ctx, newFileExportedEvent(sourcePath, targetPath), "", "{{.SourcePath}} -> {{.TargetPath}}\n") } } @@ -94,7 +94,7 @@ func newExportDir() *cobra.Command { ` cmd.Annotations = make(map[string]string) - cmd.Args = cobra.ExactArgs(2) + cmd.Args = root.ExactArgs(2) cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { diff --git a/cmd/workspace/workspace/import_dir.go b/cmd/workspace/workspace/import_dir.go index bc0b80667..19d9a0a17 100644 --- a/cmd/workspace/workspace/import_dir.go +++ b/cmd/workspace/workspace/import_dir.go @@ -93,14 +93,14 @@ func (opts importDirOptions) callback(ctx context.Context, workspaceFiler filer. // Emit file skipped event with the appropriate template fileSkippedEvent := newFileSkippedEvent(localName, path.Join(targetDir, remoteName)) template := "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n" - return cmdio.RenderWithTemplate(ctx, fileSkippedEvent, template) + return cmdio.RenderWithTemplate(ctx, fileSkippedEvent, "", template) } if err != nil { return err } } fileImportedEvent := newFileImportedEvent(localName, path.Join(targetDir, remoteName)) - return cmdio.RenderWithTemplate(ctx, fileImportedEvent, "{{.SourcePath}} -> {{.TargetPath}}\n") + return cmdio.RenderWithTemplate(ctx, fileImportedEvent, "", "{{.SourcePath}} -> {{.TargetPath}}\n") } } @@ -119,7 +119,7 @@ Notebooks will have their extensions (one of .scala, .py, .sql, .ipynb, .r) stri ` cmd.Annotations = make(map[string]string) - cmd.Args = cobra.ExactArgs(2) + cmd.Args = root.ExactArgs(2) cmd.PreRunE = root.MustWorkspaceClient cmd.RunE = func(cmd *cobra.Command, args []string) (err error) { diff --git a/cmd/workspace/workspace/overrides.go b/cmd/workspace/workspace/overrides.go index 1cac67419..cfed0a6ee 100644 --- a/cmd/workspace/workspace/overrides.go +++ b/cmd/workspace/workspace/overrides.go @@ -17,8 +17,9 @@ import ( func listOverride(listCmd *cobra.Command, listReq *workspace.ListWorkspaceRequest) { listReq.Path = "/" + listCmd.Annotations["headerTemplate"] = cmdio.Heredoc(` + {{header "ID"}} {{header "Type"}} {{header "Language"}} {{header "Path"}}`) listCmd.Annotations["template"] = cmdio.Heredoc(` - {{header "ID"}} {{header "Type"}} {{header "Language"}} {{header "Path"}} {{range .}}{{green "%d" .ObjectId}} {{blue "%s" .ObjectType}} {{cyan "%s" .Language}} {{.Path|cyan}} {{end}}`) } diff --git a/cmd/workspace/workspace/workspace.go b/cmd/workspace/workspace/workspace.go index 5777f22fe..183cac898 100755 --- a/cmd/workspace/workspace/workspace.go +++ b/cmd/workspace/workspace/workspace.go @@ -31,6 +31,18 @@ func New() *cobra.Command { }, } + // Add methods + cmd.AddCommand(newDelete()) + cmd.AddCommand(newExport()) + cmd.AddCommand(newGetPermissionLevels()) + cmd.AddCommand(newGetPermissions()) + cmd.AddCommand(newGetStatus()) + cmd.AddCommand(newImport()) + cmd.AddCommand(newList()) + cmd.AddCommand(newMkdirs()) + cmd.AddCommand(newSetPermissions()) + cmd.AddCommand(newUpdatePermissions()) + // Apply optional overrides to this command. for _, fn := range cmdOverrides { fn(cmd) @@ -79,7 +91,7 @@ func newDelete() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'path' in your JSON input") } @@ -138,12 +150,6 @@ func newDelete() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newDelete()) - }) -} - // start export command // Slice with functions to override default command behavior. @@ -231,12 +237,6 @@ func newExport() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newExport()) - }) -} - // start get-permission-levels command // Slice with functions to override default command behavior. @@ -266,7 +266,7 @@ func newGetPermissionLevels() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -297,12 +297,6 @@ func newGetPermissionLevels() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissionLevels()) - }) -} - // start get-permissions command // Slice with functions to override default command behavior. @@ -333,7 +327,7 @@ func newGetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -364,12 +358,6 @@ func newGetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetPermissions()) - }) -} - // start get-status command // Slice with functions to override default command behavior. @@ -399,7 +387,7 @@ func newGetStatus() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -429,12 +417,6 @@ func newGetStatus() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newGetStatus()) - }) -} - // start import command // Slice with functions to override default command behavior. @@ -484,13 +466,13 @@ func newImport() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'path' in your JSON input") } return nil } - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -528,12 +510,6 @@ func newImport() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newImport()) - }) -} - // start list command // Slice with functions to override default command behavior. @@ -566,7 +542,7 @@ func newList() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(1) + check := root.ExactArgs(1) return check(cmd, args) } @@ -577,11 +553,8 @@ func newList() *cobra.Command { listReq.Path = args[0] - response, err := w.Workspace.ListAll(ctx, listReq) - if err != nil { - return err - } - return cmdio.Render(ctx, response) + response := w.Workspace.List(ctx, listReq) + return cmdio.RenderIterator(ctx, response) } // Disable completions since they are not applicable. @@ -596,12 +569,6 @@ func newList() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newList()) - }) -} - // start mkdirs command // Slice with functions to override default command behavior. @@ -640,7 +607,7 @@ func newMkdirs() *cobra.Command { cmd.Args = func(cmd *cobra.Command, args []string) error { if cmd.Flags().Changed("json") { - err := cobra.ExactArgs(0)(cmd, args) + err := root.ExactArgs(0)(cmd, args) if err != nil { return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'path' in your JSON input") } @@ -699,12 +666,6 @@ func newMkdirs() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newMkdirs()) - }) -} - // start set-permissions command // Slice with functions to override default command behavior. @@ -739,7 +700,7 @@ func newSetPermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -776,12 +737,6 @@ func newSetPermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newSetPermissions()) - }) -} - // start update-permissions command // Slice with functions to override default command behavior. @@ -816,7 +771,7 @@ func newUpdatePermissions() *cobra.Command { cmd.Annotations = make(map[string]string) cmd.Args = func(cmd *cobra.Command, args []string) error { - check := cobra.ExactArgs(2) + check := root.ExactArgs(2) return check(cmd, args) } @@ -853,10 +808,4 @@ func newUpdatePermissions() *cobra.Command { return cmd } -func init() { - cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) { - cmd.AddCommand(newUpdatePermissions()) - }) -} - // end service Workspace diff --git a/docker/config.tfrc b/docker/config.tfrc new file mode 100644 index 000000000..123f6d639 --- /dev/null +++ b/docker/config.tfrc @@ -0,0 +1,6 @@ +provider_installation { + filesystem_mirror { + path = "/app/providers" + include = ["registry.terraform.io/databricks/databricks"] + } +} diff --git a/docker/setup.sh b/docker/setup.sh new file mode 100755 index 000000000..0dc06ce1e --- /dev/null +++ b/docker/setup.sh @@ -0,0 +1,32 @@ +#!/bin/bash +set -euo pipefail + +DATABRICKS_TF_VERSION=$(/app/databricks bundle debug terraform --output json | jq -r .terraform.version) +DATABRICKS_TF_PROVIDER_VERSION=$(/app/databricks bundle debug terraform --output json | jq -r .terraform.providerVersion) + +if [ $ARCH != "amd64" ] && [ $ARCH != "arm64" ]; then + echo "Unsupported architecture: $ARCH" + exit 1 +fi + +# Download the terraform binary +mkdir -p zip +wget https://releases.hashicorp.com/terraform/${DATABRICKS_TF_VERSION}/terraform_${DATABRICKS_TF_VERSION}_linux_${ARCH}.zip -O zip/terraform.zip + +# Verify the checksum. This is to ensure that the downloaded archive is not tampered with. +EXPECTED_CHECKSUM="$(/app/databricks bundle debug terraform --output json | jq -r .terraform.checksum.linux_$ARCH)" +COMPUTED_CHECKSUM=$(sha256sum zip/terraform.zip | awk '{ print $1 }') +if [ "$COMPUTED_CHECKSUM" != "$EXPECTED_CHECKSUM" ]; then + echo "Checksum mismatch for Terraform binary. Version: $DATABRICKS_TF_VERSION, Arch: $ARCH, Expected checksum: $EXPECTED_CHECKSUM, Computed checksum: $COMPUTED_CHECKSUM." + exit 1 +fi + +# Unzip the terraform binary. It's safe to do so because we have already verified the checksum. +unzip zip/terraform.zip -d zip/terraform +mkdir -p /app/bin +mv zip/terraform/terraform /app/bin/terraform + +# Download the provider plugin +TF_PROVIDER_NAME=terraform-provider-databricks_${DATABRICKS_TF_PROVIDER_VERSION}_linux_${ARCH}.zip +mkdir -p /app/providers/registry.terraform.io/databricks/databricks +wget https://github.com/databricks/terraform-provider-databricks/releases/download/v${DATABRICKS_TF_PROVIDER_VERSION}/${TF_PROVIDER_NAME} -O /app/providers/registry.terraform.io/databricks/databricks/${TF_PROVIDER_NAME} diff --git a/docs/commands.md b/docs/commands.md deleted file mode 100755 index 701e8efbe..000000000 --- a/docs/commands.md +++ /dev/null @@ -1,4216 +0,0 @@ -# Available `databricks` commands - -- [databricks alerts - The alerts API can be used to perform CRUD operations on alerts.](#databricks-alerts---the-alerts-api-can-be-used-to-perform-crud-operations-on-alerts) - - [databricks alerts create - Create an alert.](#databricks-alerts-create---create-an-alert) - - [databricks alerts delete - Delete an alert.](#databricks-alerts-delete---delete-an-alert) - - [databricks alerts get - Get an alert.](#databricks-alerts-get---get-an-alert) - - [databricks alerts list - Get alerts.](#databricks-alerts-list---get-alerts) - - [databricks alerts update - Update an alert.](#databricks-alerts-update---update-an-alert) -- [databricks catalogs - A catalog is the first layer of Unity Catalog’s three-level namespace.](#databricks-catalogs---a-catalog-is-the-first-layer-of-unity-catalogs-three-level-namespace) - - [databricks catalogs create - Create a catalog.](#databricks-catalogs-create---create-a-catalog) - - [databricks catalogs delete - Delete a catalog.](#databricks-catalogs-delete---delete-a-catalog) - - [databricks catalogs get - Get a catalog.](#databricks-catalogs-get---get-a-catalog) - - [databricks catalogs list - List catalogs.](#databricks-catalogs-list---list-catalogs) - - [databricks catalogs update - Update a catalog.](#databricks-catalogs-update---update-a-catalog) -- [databricks cluster-policies - Cluster policy limits the ability to configure clusters based on a set of rules.](#databricks-cluster-policies---cluster-policy-limits-the-ability-to-configure-clusters-based-on-a-set-of-rules) - - [databricks cluster-policies create - Create a new policy.](#databricks-cluster-policies-create---create-a-new-policy) - - [databricks cluster-policies delete - Delete a cluster policy.](#databricks-cluster-policies-delete---delete-a-cluster-policy) - - [databricks cluster-policies edit - Update a cluster policy.](#databricks-cluster-policies-edit---update-a-cluster-policy) - - [databricks cluster-policies get - Get entity.](#databricks-cluster-policies-get---get-entity) - - [databricks cluster-policies list - Get a cluster policy.](#databricks-cluster-policies-list---get-a-cluster-policy) -- [databricks clusters - The Clusters API allows you to create, start, edit, list, terminate, and delete clusters.](#databricks-clusters---the-clusters-api-allows-you-to-create-start-edit-list-terminate-and-delete-clusters) - - [databricks clusters change-owner - Change cluster owner.](#databricks-clusters-change-owner---change-cluster-owner) - - [databricks clusters create - Create new cluster.](#databricks-clusters-create---create-new-cluster) - - [databricks clusters delete - Terminate cluster.](#databricks-clusters-delete---terminate-cluster) - - [databricks clusters edit - Update cluster configuration.](#databricks-clusters-edit---update-cluster-configuration) - - [databricks clusters events - List cluster activity events.](#databricks-clusters-events---list-cluster-activity-events) - - [databricks clusters get - Get cluster info.](#databricks-clusters-get---get-cluster-info) - - [databricks clusters list - List all clusters.](#databricks-clusters-list---list-all-clusters) - - [databricks clusters list-node-types - List node types.](#databricks-clusters-list-node-types---list-node-types) - - [databricks clusters list-zones - List availability zones.](#databricks-clusters-list-zones---list-availability-zones) - - [databricks clusters permanent-delete - Permanently delete cluster.](#databricks-clusters-permanent-delete---permanently-delete-cluster) - - [databricks clusters pin - Pin cluster.](#databricks-clusters-pin---pin-cluster) - - [databricks clusters resize - Resize cluster.](#databricks-clusters-resize---resize-cluster) - - [databricks clusters restart - Restart cluster.](#databricks-clusters-restart---restart-cluster) - - [databricks clusters spark-versions - List available Spark versions.](#databricks-clusters-spark-versions---list-available-spark-versions) - - [databricks clusters start - Start terminated cluster.](#databricks-clusters-start---start-terminated-cluster) - - [databricks clusters unpin - Unpin cluster.](#databricks-clusters-unpin---unpin-cluster) -- [databricks account credentials - These commands manage credential configurations for this workspace.](#databricks-account-credentials---these-commands-manage-credential-configurations-for-this-workspace) - - [databricks account credentials create - Create credential configuration.](#databricks-account-credentials-create---create-credential-configuration) - - [databricks account credentials delete - Delete credential configuration.](#databricks-account-credentials-delete---delete-credential-configuration) - - [databricks account credentials get - Get credential configuration.](#databricks-account-credentials-get---get-credential-configuration) - - [databricks account credentials list - Get all credential configurations.](#databricks-account-credentials-list---get-all-credential-configurations) -- [databricks current-user - command allows retrieving information about currently authenticated user or service principal.](#databricks-current-user---command-allows-retrieving-information-about-currently-authenticated-user-or-service-principal) - - [databricks current-user me - Get current user info.](#databricks-current-user-me---get-current-user-info) -- [databricks account custom-app-integration - manage custom oauth app integrations.](#databricks-account-custom-app-integration---manage-custom-oauth-app-integrations) - - [databricks account custom-app-integration create - Create Custom OAuth App Integration.](#databricks-account-custom-app-integration-create---create-custom-oauth-app-integration) - - [databricks account custom-app-integration delete - Delete Custom OAuth App Integration.](#databricks-account-custom-app-integration-delete---delete-custom-oauth-app-integration) - - [databricks account custom-app-integration get - Get OAuth Custom App Integration.](#databricks-account-custom-app-integration-get---get-oauth-custom-app-integration) - - [databricks account custom-app-integration list - Get custom oauth app integrations.](#databricks-account-custom-app-integration-list---get-custom-oauth-app-integrations) - - [databricks account custom-app-integration update - Updates Custom OAuth App Integration.](#databricks-account-custom-app-integration-update---updates-custom-oauth-app-integration) -- [databricks dashboards - Databricks SQL Dashboards](#databricks-dashboards---databricks-sql-dashboards) - - [databricks dashboards create - Create a dashboard object.](#databricks-dashboards-create---create-a-dashboard-object) - - [databricks dashboards delete - Remove a dashboard.](#databricks-dashboards-delete---remove-a-dashboard) - - [databricks dashboards get - Retrieve a definition.](#databricks-dashboards-get---retrieve-a-definition) - - [databricks dashboards list - Get dashboard objects.](#databricks-dashboards-list---get-dashboard-objects) - - [databricks dashboards restore - Restore a dashboard.](#databricks-dashboards-restore---restore-a-dashboard) -- [databricks data-sources - command is provided to assist you in making new query objects.](#databricks-data-sources---command-is-provided-to-assist-you-in-making-new-query-objects) - - [databricks data-sources list - Get a list of SQL warehouses.](#databricks-data-sources-list---get-a-list-of-sql-warehouses) -- [databricks account encryption-keys - manage encryption key configurations.](#databricks-account-encryption-keys---manage-encryption-key-configurations) - - [databricks account encryption-keys create - Create encryption key configuration.](#databricks-account-encryption-keys-create---create-encryption-key-configuration) - - [databricks account encryption-keys delete - Delete encryption key configuration.](#databricks-account-encryption-keys-delete---delete-encryption-key-configuration) - - [databricks account encryption-keys get - Get encryption key configuration.](#databricks-account-encryption-keys-get---get-encryption-key-configuration) - - [databricks account encryption-keys list - Get all encryption key configurations.](#databricks-account-encryption-keys-list---get-all-encryption-key-configurations) -- [databricks experiments - Manage MLflow experiments](#databricks-experiments---manage-mlflow-experiments) - - [databricks experiments create-experiment - Create experiment.](#databricks-experiments-create-experiment---create-experiment) - - [databricks experiments create-run - Create a run.](#databricks-experiments-create-run---create-a-run) - - [databricks experiments delete-experiment - Delete an experiment.](#databricks-experiments-delete-experiment---delete-an-experiment) - - [databricks experiments delete-run - Delete a run.](#databricks-experiments-delete-run---delete-a-run) - - [databricks experiments delete-tag - Delete a tag.](#databricks-experiments-delete-tag---delete-a-tag) - - [databricks experiments get-by-name - Get metadata.](#databricks-experiments-get-by-name---get-metadata) - - [databricks experiments get-experiment - Get an experiment.](#databricks-experiments-get-experiment---get-an-experiment) - - [databricks experiments get-history - Get history of a given metric within a run.](#databricks-experiments-get-history---get-history-of-a-given-metric-within-a-run) - - [databricks experiments get-run - Get a run.](#databricks-experiments-get-run---get-a-run) - - [databricks experiments list-artifacts - Get all artifacts.](#databricks-experiments-list-artifacts---get-all-artifacts) - - [databricks experiments list-experiments - List experiments.](#databricks-experiments-list-experiments---list-experiments) - - [databricks experiments log-batch - Log a batch.](#databricks-experiments-log-batch---log-a-batch) - - [databricks experiments log-metric - Log a metric.](#databricks-experiments-log-metric---log-a-metric) - - [databricks experiments log-model - Log a model.](#databricks-experiments-log-model---log-a-model) - - [databricks experiments log-param - Log a param.](#databricks-experiments-log-param---log-a-param) - - [databricks experiments restore-experiment - Restores an experiment.](#databricks-experiments-restore-experiment---restores-an-experiment) - - [databricks experiments restore-run - Restore a run.](#databricks-experiments-restore-run---restore-a-run) - - [databricks experiments search-experiments - Search experiments.](#databricks-experiments-search-experiments---search-experiments) - - [databricks experiments search-runs - Search for runs.](#databricks-experiments-search-runs---search-for-runs) - - [databricks experiments set-experiment-tag - Set a tag.](#databricks-experiments-set-experiment-tag---set-a-tag) - - [databricks experiments set-tag - Set a tag.](#databricks-experiments-set-tag---set-a-tag) - - [databricks experiments update-experiment - Update an experiment.](#databricks-experiments-update-experiment---update-an-experiment) - - [databricks experiments update-run - Update a run.](#databricks-experiments-update-run---update-a-run) -- [databricks external-locations - manage cloud storage path with a storage credential that authorizes access to it.](#databricks-external-locations---manage-cloud-storage-path-with-a-storage-credential-that-authorizes-access-to-it) - - [databricks external-locations create - Create an external location.](#databricks-external-locations-create---create-an-external-location) - - [databricks external-locations delete - Delete an external location.](#databricks-external-locations-delete---delete-an-external-location) - - [databricks external-locations get - Get an external location.](#databricks-external-locations-get---get-an-external-location) - - [databricks external-locations list - List external locations.](#databricks-external-locations-list---list-external-locations) - - [databricks external-locations update - Update an external location.](#databricks-external-locations-update---update-an-external-location) -- [databricks functions - Functions implement User-Defined Functions UDFs in Unity Catalog.](#databricks-functions---functions-implement-user-defined-functions-udfs-in-unity-catalog) - - [databricks functions create - Create a function.](#databricks-functions-create---create-a-function) - - [databricks functions delete - Delete a function.](#databricks-functions-delete---delete-a-function) - - [databricks functions get - Get a function.](#databricks-functions-get---get-a-function) - - [databricks functions list - List functions.](#databricks-functions-list---list-functions) - - [databricks functions update - Update a function.](#databricks-functions-update---update-a-function) -- [databricks git-credentials - Registers personal access token for Databricks to do operations on behalf of the user.](#databricks-git-credentials---registers-personal-access-token-for-databricks-to-do-operations-on-behalf-of-the-user) - - [databricks git-credentials create - Create a credential entry.](#databricks-git-credentials-create---create-a-credential-entry) - - [databricks git-credentials delete - Delete a credential.](#databricks-git-credentials-delete---delete-a-credential) - - [databricks git-credentials get - Get a credential entry.](#databricks-git-credentials-get---get-a-credential-entry) - - [databricks git-credentials list - Get Git credentials.](#databricks-git-credentials-list---get-git-credentials) - - [databricks git-credentials update - Update a credential.](#databricks-git-credentials-update---update-a-credential) -- [databricks global-init-scripts - configure global initialization scripts for the workspace.](#databricks-global-init-scripts---configure-global-initialization-scripts-for-the-workspace) - - [databricks global-init-scripts create - Create init script.](#databricks-global-init-scripts-create---create-init-script) - - [databricks global-init-scripts delete - Delete init script.](#databricks-global-init-scripts-delete---delete-init-script) - - [databricks global-init-scripts get - Get an init script.](#databricks-global-init-scripts-get---get-an-init-script) - - [databricks global-init-scripts list - Get init scripts.](#databricks-global-init-scripts-list---get-init-scripts) - - [databricks global-init-scripts update - Update init script.](#databricks-global-init-scripts-update---update-init-script) -- [databricks grants - Manage data access in Unity Catalog.](#databricks-grants---manage-data-access-in-unity-catalog) - - [databricks grants get - Get permissions.](#databricks-grants-get---get-permissions) - - [databricks grants get-effective - Get effective permissions.](#databricks-grants-get-effective---get-effective-permissions) - - [databricks grants update - Update permissions.](#databricks-grants-update---update-permissions) -- [databricks groups - Groups for identity management.](#databricks-groups---groups-for-identity-management) - - [databricks groups create - Create a new group.](#databricks-groups-create---create-a-new-group) - - [databricks groups delete - Delete a group.](#databricks-groups-delete---delete-a-group) - - [databricks groups get - Get group details.](#databricks-groups-get---get-group-details) - - [databricks groups list - List group details.](#databricks-groups-list---list-group-details) - - [databricks groups patch - Update group details.](#databricks-groups-patch---update-group-details) - - [databricks groups update - Replace a group.](#databricks-groups-update---replace-a-group) -- [databricks account groups - Account-level group management](#databricks-account-groups---account-level-group-management) - - [databricks account groups create - Create a new group.](#databricks-account-groups-create---create-a-new-group) - - [databricks account groups delete - Delete a group.](#databricks-account-groups-delete---delete-a-group) - - [databricks account groups get - Get group details.](#databricks-account-groups-get---get-group-details) - - [databricks account groups list - List group details.](#databricks-account-groups-list---list-group-details) - - [databricks account groups patch - Update group details.](#databricks-account-groups-patch---update-group-details) - - [databricks account groups update - Replace a group.](#databricks-account-groups-update---replace-a-group) -- [databricks instance-pools - manage ready-to-use cloud instances which reduces a cluster start and auto-scaling times.](#databricks-instance-pools---manage-ready-to-use-cloud-instances-which-reduces-a-cluster-start-and-auto-scaling-times) - - [databricks instance-pools create - Create a new instance pool.](#databricks-instance-pools-create---create-a-new-instance-pool) - - [databricks instance-pools delete - Delete an instance pool.](#databricks-instance-pools-delete---delete-an-instance-pool) - - [databricks instance-pools edit - Edit an existing instance pool.](#databricks-instance-pools-edit---edit-an-existing-instance-pool) - - [databricks instance-pools get - Get instance pool information.](#databricks-instance-pools-get---get-instance-pool-information) - - [databricks instance-pools list - List instance pool info.](#databricks-instance-pools-list---list-instance-pool-info) -- [databricks instance-profiles - Manage instance profiles that users can launch clusters with.](#databricks-instance-profiles---manage-instance-profiles-that-users-can-launch-clusters-with) - - [databricks instance-profiles add - Register an instance profile.](#databricks-instance-profiles-add---register-an-instance-profile) - - [databricks instance-profiles edit - Edit an instance profile.](#databricks-instance-profiles-edit---edit-an-instance-profile) - - [databricks instance-profiles list - List available instance profiles.](#databricks-instance-profiles-list---list-available-instance-profiles) - - [databricks instance-profiles remove - Remove the instance profile.](#databricks-instance-profiles-remove---remove-the-instance-profile) -- [databricks ip-access-lists - enable admins to configure IP access lists.](#databricks-ip-access-lists---enable-admins-to-configure-ip-access-lists) - - [databricks ip-access-lists create - Create access list.](#databricks-ip-access-lists-create---create-access-list) - - [databricks ip-access-lists delete - Delete access list.](#databricks-ip-access-lists-delete---delete-access-list) - - [databricks ip-access-lists get - Get access list.](#databricks-ip-access-lists-get---get-access-list) - - [databricks ip-access-lists list - Get access lists.](#databricks-ip-access-lists-list---get-access-lists) - - [databricks ip-access-lists replace - Replace access list.](#databricks-ip-access-lists-replace---replace-access-list) - - [databricks ip-access-lists update - Update access list.](#databricks-ip-access-lists-update---update-access-list) -- [databricks account ip-access-lists - The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console.](#databricks-account-ip-access-lists---the-accounts-ip-access-list-api-enables-account-admins-to-configure-ip-access-lists-for-access-to-the-account-console) - - [databricks account ip-access-lists create - Create access list.](#databricks-account-ip-access-lists-create---create-access-list) - - [databricks account ip-access-lists delete - Delete access list.](#databricks-account-ip-access-lists-delete---delete-access-list) - - [databricks account ip-access-lists get - Get IP access list.](#databricks-account-ip-access-lists-get---get-ip-access-list) - - [databricks account ip-access-lists list - Get access lists.](#databricks-account-ip-access-lists-list---get-access-lists) - - [databricks account ip-access-lists replace - Replace access list.](#databricks-account-ip-access-lists-replace---replace-access-list) - - [databricks account ip-access-lists update - Update access list.](#databricks-account-ip-access-lists-update---update-access-list) -- [databricks jobs - Manage Databricks Workflows.](#databricks-jobs---manage-databricks-workflows) - - [databricks jobs cancel-all-runs - Cancel all runs of a job.](#databricks-jobs-cancel-all-runs---cancel-all-runs-of-a-job) - - [databricks jobs cancel-run - Cancel a job run.](#databricks-jobs-cancel-run---cancel-a-job-run) - - [databricks jobs create - Create a new job.](#databricks-jobs-create---create-a-new-job) - - [databricks jobs delete - Delete a job.](#databricks-jobs-delete---delete-a-job) - - [databricks jobs delete-run - Delete a job run.](#databricks-jobs-delete-run---delete-a-job-run) - - [databricks jobs export-run - Export and retrieve a job run.](#databricks-jobs-export-run---export-and-retrieve-a-job-run) - - [databricks jobs get - Get a single job.](#databricks-jobs-get---get-a-single-job) - - [databricks jobs get-run - Get a single job run.](#databricks-jobs-get-run---get-a-single-job-run) - - [databricks jobs get-run-output - Get the output for a single run.](#databricks-jobs-get-run-output---get-the-output-for-a-single-run) - - [databricks jobs list - List all jobs.](#databricks-jobs-list---list-all-jobs) - - [databricks jobs list-runs - List runs for a job.](#databricks-jobs-list-runs---list-runs-for-a-job) - - [databricks jobs repair-run - Repair a job run.](#databricks-jobs-repair-run---repair-a-job-run) - - [databricks jobs reset - Overwrites all settings for a job.](#databricks-jobs-reset---overwrites-all-settings-for-a-job) - - [databricks jobs run-now - Trigger a new job run.](#databricks-jobs-run-now---trigger-a-new-job-run) - - [databricks jobs submit - Create and trigger a one-time run.](#databricks-jobs-submit---create-and-trigger-a-one-time-run) - - [databricks jobs update - Partially updates a job.](#databricks-jobs-update---partially-updates-a-job) -- [databricks libraries - Manage libraries on a cluster.](#databricks-libraries---manage-libraries-on-a-cluster) - - [databricks libraries all-cluster-statuses - Get all statuses.](#databricks-libraries-all-cluster-statuses---get-all-statuses) - - [databricks libraries cluster-status - Get status.](#databricks-libraries-cluster-status---get-status) - - [databricks libraries install - Add a library.](#databricks-libraries-install---add-a-library) - - [databricks libraries uninstall - Uninstall libraries.](#databricks-libraries-uninstall---uninstall-libraries) -- [databricks account log-delivery - These commands manage log delivery configurations for this account.](#databricks-account-log-delivery---these-commands-manage-log-delivery-configurations-for-this-account) - - [databricks account log-delivery create - Create a new log delivery configuration.](#databricks-account-log-delivery-create---create-a-new-log-delivery-configuration) - - [databricks account log-delivery get - Get log delivery configuration.](#databricks-account-log-delivery-get---get-log-delivery-configuration) - - [databricks account log-delivery list - Get all log delivery configurations.](#databricks-account-log-delivery-list---get-all-log-delivery-configurations) - - [databricks account log-delivery patch-status - Enable or disable log delivery configuration.](#databricks-account-log-delivery-patch-status---enable-or-disable-log-delivery-configuration) -- [databricks account metastore-assignments - These commands manage metastore assignments to a workspace.](#databricks-account-metastore-assignments---these-commands-manage-metastore-assignments-to-a-workspace) - - [databricks account metastore-assignments create - Assigns a workspace to a metastore.](#databricks-account-metastore-assignments-create---assigns-a-workspace-to-a-metastore) - - [databricks account metastore-assignments delete - Delete a metastore assignment.](#databricks-account-metastore-assignments-delete---delete-a-metastore-assignment) - - [databricks account metastore-assignments get - Gets the metastore assignment for a workspace.](#databricks-account-metastore-assignments-get---gets-the-metastore-assignment-for-a-workspace) - - [databricks account metastore-assignments list - Get all workspaces assigned to a metastore.](#databricks-account-metastore-assignments-list---get-all-workspaces-assigned-to-a-metastore) - - [databricks account metastore-assignments update - Updates a metastore assignment to a workspaces.](#databricks-account-metastore-assignments-update---updates-a-metastore-assignment-to-a-workspaces) -- [databricks metastores - Manage metastores in Unity Catalog.](#databricks-metastores---manage-metastores-in-unity-catalog) - - [databricks metastores assign - Create an assignment.](#databricks-metastores-assign---create-an-assignment) - - [databricks metastores create - Create a metastore.](#databricks-metastores-create---create-a-metastore) - - [databricks metastores current - Get metastore assignment for workspace.](#databricks-metastores-current---get-metastore-assignment-for-workspace) - - [databricks metastores delete - Delete a metastore.](#databricks-metastores-delete---delete-a-metastore) - - [databricks metastores get - Get a metastore.](#databricks-metastores-get---get-a-metastore) - - [databricks metastores list - List metastores.](#databricks-metastores-list---list-metastores) - - [databricks metastores maintenance - Enables or disables auto maintenance on the metastore.](#databricks-metastores-maintenance---enables-or-disables-auto-maintenance-on-the-metastore) - - [databricks metastores summary - Get a metastore summary.](#databricks-metastores-summary---get-a-metastore-summary) - - [databricks metastores unassign - Delete an assignment.](#databricks-metastores-unassign---delete-an-assignment) - - [databricks metastores update - Update a metastore.](#databricks-metastores-update---update-a-metastore) - - [databricks metastores update-assignment - Update an assignment.](#databricks-metastores-update-assignment---update-an-assignment) -- [databricks account metastores - These commands manage Unity Catalog metastores for an account.](#databricks-account-metastores---these-commands-manage-unity-catalog-metastores-for-an-account) - - [databricks account metastores create - Create metastore.](#databricks-account-metastores-create---create-metastore) - - [databricks account metastores delete - Delete a metastore.](#databricks-account-metastores-delete---delete-a-metastore) - - [databricks account metastores get - Get a metastore.](#databricks-account-metastores-get---get-a-metastore) - - [databricks account metastores list - Get all metastores associated with an account.](#databricks-account-metastores-list---get-all-metastores-associated-with-an-account) - - [databricks account metastores update - Update a metastore.](#databricks-account-metastores-update---update-a-metastore) -- [databricks model-registry - Expose commands for Model Registry.](#databricks-model-registry---expose-commands-for-model-registry) - - [databricks model-registry approve-transition-request - Approve transition request.](#databricks-model-registry-approve-transition-request---approve-transition-request) - - [databricks model-registry create-comment - Post a comment.](#databricks-model-registry-create-comment---post-a-comment) - - [databricks model-registry create-model - Create a model.](#databricks-model-registry-create-model---create-a-model) - - [databricks model-registry create-model-version - Create a model version.](#databricks-model-registry-create-model-version---create-a-model-version) - - [databricks model-registry create-transition-request - Make a transition request.](#databricks-model-registry-create-transition-request---make-a-transition-request) - - [databricks model-registry create-webhook - Create a webhook.](#databricks-model-registry-create-webhook---create-a-webhook) - - [databricks model-registry delete-comment - Delete a comment.](#databricks-model-registry-delete-comment---delete-a-comment) - - [databricks model-registry delete-model - Delete a model.](#databricks-model-registry-delete-model---delete-a-model) - - [databricks model-registry delete-model-tag - Delete a model tag.](#databricks-model-registry-delete-model-tag---delete-a-model-tag) - - [databricks model-registry delete-model-version - Delete a model version.](#databricks-model-registry-delete-model-version---delete-a-model-version) - - [databricks model-registry delete-model-version-tag - Delete a model version tag.](#databricks-model-registry-delete-model-version-tag---delete-a-model-version-tag) - - [databricks model-registry delete-transition-request - Delete a ransition request.](#databricks-model-registry-delete-transition-request---delete-a-ransition-request) - - [databricks model-registry delete-webhook - Delete a webhook.](#databricks-model-registry-delete-webhook---delete-a-webhook) - - [databricks model-registry get-latest-versions - Get the latest version.](#databricks-model-registry-get-latest-versions---get-the-latest-version) - - [databricks model-registry get-model - Get model.](#databricks-model-registry-get-model---get-model) - - [databricks model-registry get-model-version - Get a model version.](#databricks-model-registry-get-model-version---get-a-model-version) - - [databricks model-registry get-model-version-download-uri - Get a model version URI.](#databricks-model-registry-get-model-version-download-uri---get-a-model-version-uri) - - [databricks model-registry list-models - List models.](#databricks-model-registry-list-models---list-models) - - [databricks model-registry list-transition-requests - List transition requests.](#databricks-model-registry-list-transition-requests---list-transition-requests) - - [databricks model-registry list-webhooks - List registry webhooks.](#databricks-model-registry-list-webhooks---list-registry-webhooks) - - [databricks model-registry reject-transition-request - Reject a transition request.](#databricks-model-registry-reject-transition-request---reject-a-transition-request) - - [databricks model-registry rename-model - Rename a model.](#databricks-model-registry-rename-model---rename-a-model) - - [databricks model-registry search-model-versions - Searches model versions.](#databricks-model-registry-search-model-versions---searches-model-versions) - - [databricks model-registry search-models - Search models.](#databricks-model-registry-search-models---search-models) - - [databricks model-registry set-model-tag - Set a tag.](#databricks-model-registry-set-model-tag---set-a-tag) - - [databricks model-registry set-model-version-tag - Set a version tag.](#databricks-model-registry-set-model-version-tag---set-a-version-tag) - - [databricks model-registry test-registry-webhook - Test a webhook.](#databricks-model-registry-test-registry-webhook---test-a-webhook) - - [databricks model-registry transition-stage - Transition a stage.](#databricks-model-registry-transition-stage---transition-a-stage) - - [databricks model-registry update-comment - Update a comment.](#databricks-model-registry-update-comment---update-a-comment) - - [databricks model-registry update-model - Update model.](#databricks-model-registry-update-model---update-model) - - [databricks model-registry update-model-version - Update model version.](#databricks-model-registry-update-model-version---update-model-version) - - [databricks model-registry update-webhook - Update a webhook.](#databricks-model-registry-update-webhook---update-a-webhook) -- [databricks account networks - Manage network configurations.](#databricks-account-networks---manage-network-configurations) - - [databricks account networks create - Create network configuration.](#databricks-account-networks-create---create-network-configuration) - - [databricks account networks delete - Delete a network configuration.](#databricks-account-networks-delete---delete-a-network-configuration) - - [databricks account networks get - Get a network configuration.](#databricks-account-networks-get---get-a-network-configuration) - - [databricks account networks list - Get all network configurations.](#databricks-account-networks-list---get-all-network-configurations) -- [databricks account o-auth-enrollment - These commands enable administrators to enroll OAuth for their accounts, which is required for adding/using any OAuth published/custom application integration.](#databricks-account-o-auth-enrollment---these-commands-enable-administrators-to-enroll-oauth-for-their-accounts-which-is-required-for-addingusing-any-oauth-publishedcustom-application-integration) - - [databricks account o-auth-enrollment create - Create OAuth Enrollment request.](#databricks-account-o-auth-enrollment-create---create-oauth-enrollment-request) - - [databricks account o-auth-enrollment get - Get OAuth enrollment status.](#databricks-account-o-auth-enrollment-get---get-oauth-enrollment-status) -- [databricks permissions - Manage access for various users on different objects and endpoints.](#databricks-permissions---manage-access-for-various-users-on-different-objects-and-endpoints) - - [databricks permissions get - Get object permissions.](#databricks-permissions-get---get-object-permissions) - - [databricks permissions get-permission-levels - Get permission levels.](#databricks-permissions-get-permission-levels---get-permission-levels) - - [databricks permissions set - Set permissions.](#databricks-permissions-set---set-permissions) - - [databricks permissions update - Update permission.](#databricks-permissions-update---update-permission) -- [databricks pipelines - Manage Delta Live Tables from command-line.](#databricks-pipelines---manage-delta-live-tables-from-command-line) - - [databricks pipelines create - Create a pipeline.](#databricks-pipelines-create---create-a-pipeline) - - [databricks pipelines delete - Delete a pipeline.](#databricks-pipelines-delete---delete-a-pipeline) - - [databricks pipelines get - Get a pipeline.](#databricks-pipelines-get---get-a-pipeline) - - [databricks pipelines get-update - Get a pipeline update.](#databricks-pipelines-get-update---get-a-pipeline-update) - - [databricks pipelines list-pipeline-events - List pipeline events.](#databricks-pipelines-list-pipeline-events---list-pipeline-events) - - [databricks pipelines list-pipelines - List pipelines.](#databricks-pipelines-list-pipelines---list-pipelines) - - [databricks pipelines list-updates - List pipeline updates.](#databricks-pipelines-list-updates---list-pipeline-updates) - - [databricks pipelines reset - Reset a pipeline.](#databricks-pipelines-reset---reset-a-pipeline) - - [databricks pipelines start-update - Queue a pipeline update.](#databricks-pipelines-start-update---queue-a-pipeline-update) - - [databricks pipelines stop - Stop a pipeline.](#databricks-pipelines-stop---stop-a-pipeline) - - [databricks pipelines update - Edit a pipeline.](#databricks-pipelines-update---edit-a-pipeline) -- [databricks policy-families - View available policy families.](#databricks-policy-families---view-available-policy-families) - - [databricks policy-families get - get cluster policy family.](#databricks-policy-families-get---get-cluster-policy-family) - - [databricks policy-families list - list policy families.](#databricks-policy-families-list---list-policy-families) -- [databricks account private-access - PrivateLink settings.](#databricks-account-private-access---privatelink-settings) - - [databricks account private-access create - Create private access settings.](#databricks-account-private-access-create---create-private-access-settings) - - [databricks account private-access delete - Delete a private access settings object.](#databricks-account-private-access-delete---delete-a-private-access-settings-object) - - [databricks account private-access get - Get a private access settings object.](#databricks-account-private-access-get---get-a-private-access-settings-object) - - [databricks account private-access list - Get all private access settings objects.](#databricks-account-private-access-list---get-all-private-access-settings-objects) - - [databricks account private-access replace - Replace private access settings.](#databricks-account-private-access-replace---replace-private-access-settings) -- [databricks providers - Delta Sharing Providers commands.](#databricks-providers---delta-sharing-providers-commands) - - [databricks providers create - Create an auth provider.](#databricks-providers-create---create-an-auth-provider) - - [databricks providers delete - Delete a provider.](#databricks-providers-delete---delete-a-provider) - - [databricks providers get - Get a provider.](#databricks-providers-get---get-a-provider) - - [databricks providers list - List providers.](#databricks-providers-list---list-providers) - - [databricks providers list-shares - List shares by Provider.](#databricks-providers-list-shares---list-shares-by-provider) - - [databricks providers update - Update a provider.](#databricks-providers-update---update-a-provider) -- [databricks account published-app-integration - manage published OAuth app integrations like Tableau Cloud for Databricks in AWS cloud.](#databricks-account-published-app-integration---manage-published-oauth-app-integrations-like-tableau-cloud-for-databricks-in-aws-cloud) - - [databricks account published-app-integration create - Create Published OAuth App Integration.](#databricks-account-published-app-integration-create---create-published-oauth-app-integration) - - [databricks account published-app-integration delete - Delete Published OAuth App Integration.](#databricks-account-published-app-integration-delete---delete-published-oauth-app-integration) - - [databricks account published-app-integration get - Get OAuth Published App Integration.](#databricks-account-published-app-integration-get---get-oauth-published-app-integration) - - [databricks account published-app-integration list - Get published oauth app integrations.](#databricks-account-published-app-integration-list---get-published-oauth-app-integrations) - - [databricks account published-app-integration update - Updates Published OAuth App Integration.](#databricks-account-published-app-integration-update---updates-published-oauth-app-integration) -- [databricks queries - These endpoints are used for CRUD operations on query definitions.](#databricks-queries---these-endpoints-are-used-for-crud-operations-on-query-definitions) - - [databricks queries create - Create a new query definition.](#databricks-queries-create---create-a-new-query-definition) - - [databricks queries delete - Delete a query.](#databricks-queries-delete---delete-a-query) - - [databricks queries get - Get a query definition.](#databricks-queries-get---get-a-query-definition) - - [databricks queries list - Get a list of queries.](#databricks-queries-list---get-a-list-of-queries) - - [databricks queries restore - Restore a query.](#databricks-queries-restore---restore-a-query) - - [databricks queries update - Change a query definition.](#databricks-queries-update---change-a-query-definition) -- [databricks query-history - Access the history of queries through SQL warehouses.](#databricks-query-history---access-the-history-of-queries-through-sql-warehouses) - - [databricks query-history list - List Queries.](#databricks-query-history-list---list-queries) -- [databricks recipient-activation - Delta Sharing recipient activation commands.](#databricks-recipient-activation---delta-sharing-recipient-activation-commands) - - [databricks recipient-activation get-activation-url-info - Get a share activation URL.](#databricks-recipient-activation-get-activation-url-info---get-a-share-activation-url) - - [databricks recipient-activation retrieve-token - Get an access token.](#databricks-recipient-activation-retrieve-token---get-an-access-token) -- [databricks recipients - Delta Sharing recipients.](#databricks-recipients---delta-sharing-recipients) - - [databricks recipients create - Create a share recipient.](#databricks-recipients-create---create-a-share-recipient) - - [databricks recipients delete - Delete a share recipient.](#databricks-recipients-delete---delete-a-share-recipient) - - [databricks recipients get - Get a share recipient.](#databricks-recipients-get---get-a-share-recipient) - - [databricks recipients list - List share recipients.](#databricks-recipients-list---list-share-recipients) - - [databricks recipients rotate-token - Rotate a token.](#databricks-recipients-rotate-token---rotate-a-token) - - [databricks recipients share-permissions - Get recipient share permissions.](#databricks-recipients-share-permissions---get-recipient-share-permissions) - - [databricks recipients update - Update a share recipient.](#databricks-recipients-update---update-a-share-recipient) -- [databricks repos - Manage their git repos.](#databricks-repos---manage-their-git-repos) - - [databricks repos create - Create a repo.](#databricks-repos-create---create-a-repo) - - [databricks repos delete - Delete a repo.](#databricks-repos-delete---delete-a-repo) - - [databricks repos get - Get a repo.](#databricks-repos-get---get-a-repo) - - [databricks repos list - Get repos.](#databricks-repos-list---get-repos) - - [databricks repos update - Update a repo.](#databricks-repos-update---update-a-repo) -- [databricks schemas - Manage schemas in Unity Catalog.](#databricks-schemas---manage-schemas-in-unity-catalog) - - [databricks schemas create - Create a schema.](#databricks-schemas-create---create-a-schema) - - [databricks schemas delete - Delete a schema.](#databricks-schemas-delete---delete-a-schema) - - [databricks schemas get - Get a schema.](#databricks-schemas-get---get-a-schema) - - [databricks schemas list - List schemas.](#databricks-schemas-list---list-schemas) - - [databricks schemas update - Update a schema.](#databricks-schemas-update---update-a-schema) -- [databricks secrets - manage secrets, secret scopes, and access permissions.](#databricks-secrets---manage-secrets-secret-scopes-and-access-permissions) - - [databricks secrets create-scope - Create a new secret scope.](#databricks-secrets-create-scope---create-a-new-secret-scope) - - [databricks secrets delete-acl - Delete an ACL.](#databricks-secrets-delete-acl---delete-an-acl) - - [databricks secrets delete-scope - Delete a secret scope.](#databricks-secrets-delete-scope---delete-a-secret-scope) - - [databricks secrets delete-secret - Delete a secret.](#databricks-secrets-delete-secret---delete-a-secret) - - [databricks secrets get-acl - Get secret ACL details.](#databricks-secrets-get-acl---get-secret-acl-details) - - [databricks secrets list-acls - Lists ACLs.](#databricks-secrets-list-acls---lists-acls) - - [databricks secrets list-scopes - List all scopes.](#databricks-secrets-list-scopes---list-all-scopes) - - [databricks secrets list-secrets - List secret keys.](#databricks-secrets-list-secrets---list-secret-keys) - - [databricks secrets put-acl - Create/update an ACL.](#databricks-secrets-put-acl---createupdate-an-acl) - - [databricks secrets put-secret - Add a secret.](#databricks-secrets-put-secret---add-a-secret) -- [databricks service-principals - Manage service principals.](#databricks-service-principals---manage-service-principals) - - [databricks service-principals create - Create a service principal.](#databricks-service-principals-create---create-a-service-principal) - - [databricks service-principals delete - Delete a service principal.](#databricks-service-principals-delete---delete-a-service-principal) - - [databricks service-principals get - Get service principal details.](#databricks-service-principals-get---get-service-principal-details) - - [databricks service-principals list - List service principals.](#databricks-service-principals-list---list-service-principals) - - [databricks service-principals patch - Update service principal details.](#databricks-service-principals-patch---update-service-principal-details) - - [databricks service-principals update - Replace service principal.](#databricks-service-principals-update---replace-service-principal) -- [databricks account service-principals - Manage service principals on the account level.](#databricks-account-service-principals---manage-service-principals-on-the-account-level) - - [databricks account service-principals create - Create a service principal.](#databricks-account-service-principals-create---create-a-service-principal) - - [databricks account service-principals delete - Delete a service principal.](#databricks-account-service-principals-delete---delete-a-service-principal) - - [databricks account service-principals get - Get service principal details.](#databricks-account-service-principals-get---get-service-principal-details) - - [databricks account service-principals list - List service principals.](#databricks-account-service-principals-list---list-service-principals) - - [databricks account service-principals patch - Update service principal details.](#databricks-account-service-principals-patch---update-service-principal-details) - - [databricks account service-principals update - Replace service principal.](#databricks-account-service-principals-update---replace-service-principal) -- [databricks serving-endpoints - Manage model serving endpoints.](#databricks-serving-endpoints---manage-model-serving-endpoints) - - [databricks serving-endpoints build-logs - Retrieve the logs associated with building the model's environment for a given serving endpoint's served model.](#databricks-serving-endpoints-build-logs---retrieve-the-logs-associated-with-building-the-models-environment-for-a-given-serving-endpoints-served-model) - - [databricks serving-endpoints create - Create a new serving endpoint.](#databricks-serving-endpoints-create---create-a-new-serving-endpoint) - - [databricks serving-endpoints delete - Delete a serving endpoint.](#databricks-serving-endpoints-delete---delete-a-serving-endpoint) - - [databricks serving-endpoints export-metrics - Retrieve the metrics corresponding to a serving endpoint for the current time in Prometheus or OpenMetrics exposition format.](#databricks-serving-endpoints-export-metrics---retrieve-the-metrics-corresponding-to-a-serving-endpoint-for-the-current-time-in-prometheus-or-openmetrics-exposition-format) - - [databricks serving-endpoints get - Get a single serving endpoint.](#databricks-serving-endpoints-get---get-a-single-serving-endpoint) - - [databricks serving-endpoints list - Retrieve all serving endpoints.](#databricks-serving-endpoints-list---retrieve-all-serving-endpoints) - - [databricks serving-endpoints logs - Retrieve the most recent log lines associated with a given serving endpoint's served model.](#databricks-serving-endpoints-logs---retrieve-the-most-recent-log-lines-associated-with-a-given-serving-endpoints-served-model) - - [databricks serving-endpoints query - Query a serving endpoint with provided model input.](#databricks-serving-endpoints-query---query-a-serving-endpoint-with-provided-model-input) - - [databricks serving-endpoints update-config - Update a serving endpoint with a new config.](#databricks-serving-endpoints-update-config---update-a-serving-endpoint-with-a-new-config) -- [databricks shares - Databricks Shares commands.](#databricks-shares---databricks-shares-commands) - - [databricks shares create - Create a share.](#databricks-shares-create---create-a-share) - - [databricks shares delete - Delete a share.](#databricks-shares-delete---delete-a-share) - - [databricks shares get - Get a share.](#databricks-shares-get---get-a-share) - - [databricks shares list - List shares.](#databricks-shares-list---list-shares) - - [databricks shares share-permissions - Get permissions.](#databricks-shares-share-permissions---get-permissions) - - [databricks shares update - Update a share.](#databricks-shares-update---update-a-share) - - [databricks shares update-permissions - Update permissions.](#databricks-shares-update-permissions---update-permissions) -- [databricks account storage - Manage storage configurations for this workspace.](#databricks-account-storage---manage-storage-configurations-for-this-workspace) - - [databricks account storage create - Create new storage configuration.](#databricks-account-storage-create---create-new-storage-configuration) - - [databricks account storage delete - Delete storage configuration.](#databricks-account-storage-delete---delete-storage-configuration) - - [databricks account storage get - Get storage configuration.](#databricks-account-storage-get---get-storage-configuration) - - [databricks account storage list - Get all storage configurations.](#databricks-account-storage-list---get-all-storage-configurations) -- [databricks storage-credentials - Manage storage credentials for Unity Catalog.](#databricks-storage-credentials---manage-storage-credentials-for-unity-catalog) - - [databricks storage-credentials create - Create a storage credential.](#databricks-storage-credentials-create---create-a-storage-credential) - - [databricks storage-credentials delete - Delete a credential.](#databricks-storage-credentials-delete---delete-a-credential) - - [databricks storage-credentials get - Get a credential.](#databricks-storage-credentials-get---get-a-credential) - - [databricks storage-credentials list - List credentials.](#databricks-storage-credentials-list---list-credentials) - - [databricks storage-credentials update - Update a credential.](#databricks-storage-credentials-update---update-a-credential) - - [databricks storage-credentials validate - Validate a storage credential.](#databricks-storage-credentials-validate---validate-a-storage-credential) -- [databricks account storage-credentials - These commands manage storage credentials for a particular metastore.](#databricks-account-storage-credentials---these-commands-manage-storage-credentials-for-a-particular-metastore) - - [databricks account storage-credentials create - Create a storage credential.](#databricks-account-storage-credentials-create---create-a-storage-credential) - - [databricks account storage-credentials get - Gets the named storage credential.](#databricks-account-storage-credentials-get---gets-the-named-storage-credential) - - [databricks account storage-credentials list - Get all storage credentials assigned to a metastore.](#databricks-account-storage-credentials-list---get-all-storage-credentials-assigned-to-a-metastore) -- [databricks table-constraints - Primary key and foreign key constraints encode relationships between fields in tables.](#databricks-table-constraints---primary-key-and-foreign-key-constraints-encode-relationships-between-fields-in-tables) - - [databricks table-constraints create - Create a table constraint.](#databricks-table-constraints-create---create-a-table-constraint) - - [databricks table-constraints delete - Delete a table constraint.](#databricks-table-constraints-delete---delete-a-table-constraint) -- [databricks tables - A table resides in the third layer of Unity Catalog’s three-level namespace.](#databricks-tables---a-table-resides-in-the-third-layer-of-unity-catalogs-three-level-namespace) - - [databricks tables delete - Delete a table.](#databricks-tables-delete---delete-a-table) - - [databricks tables get - Get a table.](#databricks-tables-get---get-a-table) - - [databricks tables list - List tables.](#databricks-tables-list---list-tables) - - [databricks tables list-summaries - List table summaries.](#databricks-tables-list-summaries---list-table-summaries) -- [databricks token-management - Enables administrators to get all tokens and delete tokens for other users.](#databricks-token-management---enables-administrators-to-get-all-tokens-and-delete-tokens-for-other-users) - - [databricks token-management create-obo-token - Create on-behalf token.](#databricks-token-management-create-obo-token---create-on-behalf-token) - - [databricks token-management delete - Delete a token.](#databricks-token-management-delete---delete-a-token) - - [databricks token-management get - Get token info.](#databricks-token-management-get---get-token-info) - - [databricks token-management list - List all tokens.](#databricks-token-management-list---list-all-tokens) -- [databricks tokens - The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks commandss.](#databricks-tokens---the-token-api-allows-you-to-create-list-and-revoke-tokens-that-can-be-used-to-authenticate-and-access-databricks-commandss) - - [databricks tokens create - Create a user token.](#databricks-tokens-create---create-a-user-token) - - [databricks tokens delete - Revoke token.](#databricks-tokens-delete---revoke-token) - - [databricks tokens list - List tokens.](#databricks-tokens-list---list-tokens) -- [databricks users - Manage users on the workspace-level.](#databricks-users---manage-users-on-the-workspace-level) - - [databricks users create - Create a new user.](#databricks-users-create---create-a-new-user) - - [databricks users delete - Delete a user.](#databricks-users-delete---delete-a-user) - - [databricks users get - Get user details.](#databricks-users-get---get-user-details) - - [databricks users list - List users.](#databricks-users-list---list-users) - - [databricks users patch - Update user details.](#databricks-users-patch---update-user-details) - - [databricks users update - Replace a user.](#databricks-users-update---replace-a-user) -- [databricks account users - Manage users on the accou](#databricks-account-users---manage-users-on-the-accou) - - [databricks account users create - Create a new user.](#databricks-account-users-create---create-a-new-user) - - [databricks account users delete - Delete a user.](#databricks-account-users-delete---delete-a-user) - - [databricks account users get - Get user details.](#databricks-account-users-get---get-user-details) - - [databricks account users list - List users.](#databricks-account-users-list---list-users) - - [databricks account users patch - Update user details.](#databricks-account-users-patch---update-user-details) - - [databricks account users update - Replace a user.](#databricks-account-users-update---replace-a-user) -- [databricks account vpc-endpoints - Manage VPC endpoints.](#databricks-account-vpc-endpoints---manage-vpc-endpoints) - - [databricks account vpc-endpoints create - Create VPC endpoint configuration.](#databricks-account-vpc-endpoints-create---create-vpc-endpoint-configuration) - - [databricks account vpc-endpoints delete - Delete VPC endpoint configuration.](#databricks-account-vpc-endpoints-delete---delete-vpc-endpoint-configuration) - - [databricks account vpc-endpoints get - Get a VPC endpoint configuration.](#databricks-account-vpc-endpoints-get---get-a-vpc-endpoint-configuration) - - [databricks account vpc-endpoints list - Get all VPC endpoint configurations.](#databricks-account-vpc-endpoints-list---get-all-vpc-endpoint-configurations) -- [databricks warehouses - Manage Databricks SQL warehouses.](#databricks-warehouses---manage-databricks-sql-warehouses) - - [databricks warehouses create - Create a warehouse.](#databricks-warehouses-create---create-a-warehouse) - - [databricks warehouses delete - Delete a warehouse.](#databricks-warehouses-delete---delete-a-warehouse) - - [databricks warehouses edit - Update a warehouse.](#databricks-warehouses-edit---update-a-warehouse) - - [databricks warehouses get - Get warehouse info.](#databricks-warehouses-get---get-warehouse-info) - - [databricks warehouses get-workspace-warehouse-config - Get the workspace configuration.](#databricks-warehouses-get-workspace-warehouse-config---get-the-workspace-configuration) - - [databricks warehouses list - List warehouses.](#databricks-warehouses-list---list-warehouses) - - [databricks warehouses set-workspace-warehouse-config - Set the workspace configuration.](#databricks-warehouses-set-workspace-warehouse-config---set-the-workspace-configuration) - - [databricks warehouses start - Start a warehouse.](#databricks-warehouses-start---start-a-warehouse) - - [databricks warehouses stop - Stop a warehouse.](#databricks-warehouses-stop---stop-a-warehouse) -- [databricks workspace - The Workspace API allows you to list, import, export, and delete notebooks and folders.](#databricks-workspace---the-workspace-api-allows-you-to-list-import-export-and-delete-notebooks-and-folders) - - [databricks workspace delete - Delete a workspace object.](#databricks-workspace-delete---delete-a-workspace-object) - - [databricks workspace export - Export a workspace object.](#databricks-workspace-export---export-a-workspace-object) - - [databricks workspace get-status - Get status.](#databricks-workspace-get-status---get-status) - - [databricks workspace import - Import a workspace object.](#databricks-workspace-import---import-a-workspace-object) - - [databricks workspace list - List contents.](#databricks-workspace-list---list-contents) - - [databricks workspace mkdirs - Create a directory.](#databricks-workspace-mkdirs---create-a-directory) -- [databricks account workspace-assignment - The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account.](#databricks-account-workspace-assignment---the-workspace-permission-assignment-api-allows-you-to-manage-workspace-permissions-for-principals-in-your-account) - - [databricks account workspace-assignment delete - Delete permissions assignment.](#databricks-account-workspace-assignment-delete---delete-permissions-assignment) - - [databricks account workspace-assignment get - List workspace permissions.](#databricks-account-workspace-assignment-get---list-workspace-permissions) - - [databricks account workspace-assignment list - Get permission assignments.](#databricks-account-workspace-assignment-list---get-permission-assignments) - - [databricks account workspace-assignment update - Create or update permissions assignment.](#databricks-account-workspace-assignment-update---create-or-update-permissions-assignment) -- [databricks workspace-conf - command allows updating known workspace settings for advanced users.](#databricks-workspace-conf---command-allows-updating-known-workspace-settings-for-advanced-users) - - [databricks workspace-conf get-status - Check configuration status.](#databricks-workspace-conf-get-status---check-configuration-status) - - [databricks workspace-conf set-status - Enable/disable features.](#databricks-workspace-conf-set-status---enabledisable-features) -- [databricks account workspaces - These commands manage workspaces for this account.](#databricks-account-workspaces---these-commands-manage-workspaces-for-this-account) - - [databricks account workspaces create - Create a new workspace.](#databricks-account-workspaces-create---create-a-new-workspace) - - [databricks account workspaces delete - Delete a workspace.](#databricks-account-workspaces-delete---delete-a-workspace) - - [databricks account workspaces get - Get a workspace.](#databricks-account-workspaces-get---get-a-workspace) - - [databricks account workspaces list - Get all workspaces.](#databricks-account-workspaces-list---get-all-workspaces) - - [databricks account workspaces update - Update workspace configuration.](#databricks-account-workspaces-update---update-workspace-configuration) - - -## `databricks alerts` - The alerts API can be used to perform CRUD operations on alerts. - -The alerts API can be used to perform CRUD operations on alerts. An alert is a Databricks SQL -object that periodically runs a query, evaluates a condition of its result, and notifies one -or more users and/or notification destinations if the condition was met. - -### `databricks alerts create` - Create an alert. - -An alert is a Databricks SQL object that periodically runs a query, evaluates a condition of its result, -and notifies users or notification destinations if the condition was met. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body * `--parent` - The identifier of the workspace folder containing the alert. - * `--rearm` - Number of seconds after being triggered before the alert rearms itself and can be triggered again. - -### `databricks alerts delete` - Delete an alert. - -Deletes an alert. Deleted alerts are no longer accessible and cannot be restored. -**Note:** Unlike queries and dashboards, alerts cannot be moved to the trash. - -### `databricks alerts get` - Get an alert. - -Gets an alert. - -### `databricks alerts list` - Get alerts. - -Gets a list of alerts. - -### `databricks alerts update` - Update an alert. - -Updates an alert. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--rearm` - Number of seconds after being triggered before the alert rearms itself and can be triggered again. - -## `databricks catalogs` - A catalog is the first layer of Unity Catalog’s three-level namespace. - -A catalog is the first layer of Unity Catalog’s three-level namespace. It’s used to organize -your data assets. Users can see all catalogs on which they have been assigned the USE_CATALOG -data permission. - -In Unity Catalog, admins and data stewards manage users and their access to data centrally -across all of the workspaces in a Databricks account. Users in different workspaces can -share access to the same data, depending on privileges granted centrally in Unity Catalog. - -### `databricks catalogs create` - Create a catalog. - -Creates a new catalog instance in the parent metastore if the caller is a metastore admin or has the **CREATE_CATALOG** privilege. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--provider-name` - The name of delta sharing provider. - * `--share-name` - The name of the share under the share provider. - * `--storage-root` - Storage root URL for managed tables within catalog. - -### `databricks catalogs delete` - Delete a catalog. - -Deletes the catalog that matches the supplied name. The caller must be a metastore admin or the owner of the catalog. - -Flags: -* `--force` - Force deletion even if the catalog is not empty. - -### `databricks catalogs get` - Get a catalog. - -Gets the specified catalog in a metastore. The caller must be a metastore admin, the owner of the catalog, or a user that has the **USE_CATALOG** privilege set for their account. - -### `databricks catalogs list` - List catalogs. - -Gets an array of catalogs in the metastore. -If the caller is the metastore admin, all catalogs will be retrieved. -Otherwise, only catalogs owned by the caller (or for which the caller has the **USE_CATALOG** privilege) will be retrieved. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks catalogs update` - Update a catalog. - -Updates the catalog that matches the supplied name. -The caller must be either the owner of the catalog, or a metastore admin (when changing the owner field of the catalog). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--name` - Name of catalog. - * `--owner` - Username of current owner of catalog. - -## `databricks cluster-policies` - Cluster policy limits the ability to configure clusters based on a set of rules. - -Cluster policy limits the ability to configure clusters based on a set of rules. The policy -rules limit the attributes or attribute values available for cluster creation. Cluster -policies have ACLs that limit their use to specific users and groups. - -Cluster policies let you limit users to create clusters with prescribed settings, simplify -the user interface and enable more users to create their own clusters (by fixing and hiding -some values), control cost by limiting per cluster maximum cost (by setting limits on -attributes whose values contribute to hourly price). - -Cluster policy permissions limit which policies a user can select in the Policy drop-down -when the user creates a cluster: -- A user who has cluster create permission can select the Unrestricted policy and create - fully-configurable clusters. -- A user who has both cluster create permission and access to cluster policies can select - the Unrestricted policy and policies they have access to. -- A user that has access to only cluster policies, can select the policies they have access to. - -If no policies have been created in the workspace, the Policy drop-down does not display. - -Only admin users can create, edit, and delete policies. -Admin users also have access to all policies. - -### `databricks cluster-policies create` - Create a new policy. - -Creates a new policy with prescribed settings. - -Flags: - * `--definition` - Policy definition document expressed in Databricks Cluster Policy Definition Language. - * `--description` - Additional human-readable description of the cluster policy. - * `--max-clusters-per-user` - Max number of clusters per user that can be active using this policy. - * `--policy-family-definition-overrides` - Policy definition JSON document expressed in Databricks Policy Definition Language. - * `--policy-family-id` - ID of the policy family. - -### `databricks cluster-policies delete` - Delete a cluster policy. - -Delete a policy for a cluster. Clusters governed by this policy can still run, but cannot be edited. - -### `databricks cluster-policies edit` - Update a cluster policy. - -Update an existing policy for cluster. This operation may make some clusters governed by the previous policy invalid. - -Flags: - * `--definition` - Policy definition document expressed in Databricks Cluster Policy Definition Language. - * `--description` - Additional human-readable description of the cluster policy. - * `--max-clusters-per-user` - Max number of clusters per user that can be active using this policy. - * `--policy-family-definition-overrides` - Policy definition JSON document expressed in Databricks Policy Definition Language. - * `--policy-family-id` - ID of the policy family. - -### `databricks cluster-policies get` - Get entity. - -Get a cluster policy entity. Creation and editing is available to admins only. - -### `databricks cluster-policies list` - Get a cluster policy. - -Returns a list of policies accessible by the requesting user. - -Flags: - * `--sort-column` - The cluster policy attribute to sort by. - * `--sort-order` - The order in which the policies get listed. - -## `databricks clusters` - The Clusters API allows you to create, start, edit, list, terminate, and delete clusters. - -Databricks maps cluster node instance types to compute units known as DBUs. See the instance -type pricing page for a list of the supported instance types and their corresponding DBUs. - -A Databricks cluster is a set of computation resources and configurations on which you run -data engineering, data science, and data analytics workloads, such as production -ETL pipelines, streaming analytics, ad-hoc analytics, and machine learning. - -You run these workloads as a set of commands in a notebook or as an automated job. -Databricks makes a distinction between all-purpose clusters and job clusters. You use -all-purpose clusters to analyze data collaboratively using interactive notebooks. You use -job clusters to run fast and robust automated jobs. - -You can create an all-purpose cluster using the UI, CLI, or commands. You can manually -terminate and restart an all-purpose cluster. Multiple users can share such clusters to do -collaborative interactive analysis. - -IMPORTANT: Databricks retains cluster configuration information for up to 200 all-purpose -clusters terminated in the last 30 days and up to 30 job clusters recently terminated by -the job scheduler. To keep an all-purpose cluster configuration even after it has been -terminated for more than 30 days, an administrator can pin a cluster to the cluster list. - -### `databricks clusters change-owner` - Change cluster owner. - -Change the owner of the cluster. You must be an admin to perform this operation. - -### `databricks clusters create` - Create new cluster. - -Creates a new Spark cluster. This method will acquire new instances from the cloud provider if necessary. -This method is asynchronous; the returned `cluster_id` can be used to poll the cluster status. -When this method returns, the cluster will be in a `PENDING` state. -The cluster will be usable once it enters a `RUNNING` state. - -Note: Databricks may not be able to acquire some of the requested nodes, due to cloud provider limitations -(account limits, spot price, etc.) or transient network issues. - -If Databricks acquires at least 85% of the requested on-demand nodes, cluster creation will succeed. -Otherwise the cluster will terminate with an informative error message. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--apply-policy-default-values` - Note: This field won't be true for webapp requests. - * `--autotermination-minutes` - Automatically terminates the cluster after it is inactive for this time in minutes. - * `--cluster-name` - Cluster name requested by the user. - * `--cluster-source` - Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. - * `--driver-instance-pool-id` - The optional ID of the instance pool for the driver of the cluster belongs. - * `--driver-node-type-id` - The node type of the Spark driver. - * `--enable-elastic-disk` - Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space. - * `--enable-local-disk-encryption` - Whether to enable LUKS on cluster VMs' local disks. - * `--instance-pool-id` - The optional ID of the instance pool to which the cluster belongs. - * `--node-type-id` - This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. - * `--num-workers` - Number of worker nodes that this cluster should have. - * `--policy-id` - The ID of the cluster policy used to create the cluster if applicable. - * `--runtime-engine` - Decides which runtime engine to be use, e.g. - -### `databricks clusters delete` - Terminate cluster. - -Terminates the Spark cluster with the specified ID. The cluster is removed asynchronously. -Once the termination has completed, the cluster will be in a `TERMINATED` state. -If the cluster is already in a `TERMINATING` or `TERMINATED` state, nothing will happen. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED state. - * `--timeout` - maximum amount of time to reach TERMINATED state. - -### `databricks clusters edit` - Update cluster configuration. - -Updates the configuration of a cluster to match the provided attributes and size. -A cluster can be updated if it is in a `RUNNING` or `TERMINATED` state. - -If a cluster is updated while in a `RUNNING` state, it will be restarted so that the new attributes can take effect. - -If a cluster is updated while in a `TERMINATED` state, it will remain `TERMINATED`. -The next time it is started using the `clusters/start` API, the new attributes will take effect. -Any attempt to update a cluster in any other state will be rejected with an `INVALID_STATE` error code. - -Clusters created by the Databricks Jobs service cannot be edited. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--apply-policy-default-values` - Note: This field won't be true for webapp requests. - * `--autotermination-minutes` - Automatically terminates the cluster after it is inactive for this time in minutes. - * `--cluster-name` - Cluster name requested by the user. - * `--cluster-source` - Determines whether the cluster was created by a user through the UI, created by the Databricks Jobs Scheduler, or through an API request. - * `--driver-instance-pool-id` - The optional ID of the instance pool for the driver of the cluster belongs. - * `--driver-node-type-id` - The node type of the Spark driver. - * `--enable-elastic-disk` - Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk space when its Spark workers are running low on disk space. - * `--enable-local-disk-encryption` - Whether to enable LUKS on cluster VMs' local disks. - * `--instance-pool-id` - The optional ID of the instance pool to which the cluster belongs. - * `--node-type-id` - This field encodes, through a single value, the resources available to each of the Spark nodes in this cluster. - * `--num-workers` - Number of worker nodes that this cluster should have. - * `--policy-id` - The ID of the cluster policy used to create the cluster if applicable. - * `--runtime-engine` - Decides which runtime engine to be use, e.g. - -### `databricks clusters events` - List cluster activity events. - -Retrieves a list of events about the activity of a cluster. -command is paginated. If there are more events to read, the response includes all the nparameters necessary to request -the next page of events. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--end-time` - The end time in epoch milliseconds. - * `--limit` - The maximum number of events to include in a page of events. - * `--offset` - The offset in the result set. - * `--order` - The order to list events in; either "ASC" or "DESC". - * `--start-time` - The start time in epoch milliseconds. - -### `databricks clusters get` - Get cluster info. - -"Retrieves the information for a cluster given its identifier. -Clusters can be described while they are running, or up to 60 days after they are terminated. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks clusters list` - List all clusters. - -Return information about all pinned clusters, active clusters, up to 200 of the most recently terminated all-purpose clusters in -the past 30 days, and up to 30 of the most recently terminated job clusters in the past 30 days. - -For example, if there is 1 pinned cluster, 4 active clusters, 45 terminated all-purpose clusters in the past 30 days, -and 50 terminated job clusters in the past 30 days, then command returns the 1 pinned cluster, 4 active clusters, -all 45 terminated all-purpose clusters, and the 30 most recently terminated job clusters. - -Flags: - * `--can-use-client` - Filter clusters based on what type of client it can be used for. - -### `databricks clusters list-node-types` - List node types. - -Returns a list of supported Spark node types. These node types can be used to launch a cluster. - -### `databricks clusters list-zones` - List availability zones. - -Returns a list of availability zones where clusters can be created in (For example, us-west-2a). -These zones can be used to launch a cluster. - -### `databricks clusters permanent-delete` - Permanently delete cluster. - -Permanently deletes a Spark cluster. This cluster is terminated and resources are asynchronously removed. - -In addition, users will no longer see permanently deleted clusters in the cluster list, and API users can no longer -perform any action on permanently deleted clusters. - -### `databricks clusters pin` - Pin cluster. - -Pinning a cluster ensures that the cluster will always be returned by the ListClusters API. -Pinning a cluster that is already pinned will have no effect. -command can only be called by workspace admins. - -### `databricks clusters resize` - Resize cluster. - -Resizes a cluster to have a desired number of workers. This will fail unless the cluster is in a `RUNNING` state. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--num-workers` - Number of worker nodes that this cluster should have. - -### `databricks clusters restart` - Restart cluster. - -Restarts a Spark cluster with the supplied ID. If the cluster is not currently in a `RUNNING` state, nothing will happen. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks clusters spark-versions` - List available Spark versions. - -Returns the list of available Spark versions. These versions can be used to launch a cluster. - -### `databricks clusters start` - Start terminated cluster. - -Starts a terminated Spark cluster with the supplied ID. -This works similar to `createCluster` except: - -* The previous cluster id and attributes are preserved. -* The cluster starts with the last specified cluster size. -* If the previous cluster was an autoscaling cluster, the current cluster starts with the minimum number of nodes. -* If the cluster is not currently in a `TERMINATED` state, nothing will happen. -* Clusters launched to run a job cannot be started. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks clusters unpin` - Unpin cluster. - -Unpinning a cluster will allow the cluster to eventually be removed from the ListClusters API. -Unpinning a cluster that is not pinned will have no effect. -command can only be called by workspace admins. - -## `databricks account credentials` - These commands manage credential configurations for this workspace. - -Databricks needs access to a cross-account service IAM role in your AWS account so that Databricks can deploy clusters -in the appropriate VPC for the new workspace. A credential configuration encapsulates this -role information, and its ID is used when creating a new workspace. - -### `databricks account credentials create` - Create credential configuration. - -Creates a Databricks credential configuration that represents cloud cross-account credentials for a specified account. Databricks uses this to set up network infrastructure properly to host Databricks clusters. For your AWS IAM role, you need to trust the External ID (the Databricks Account API account ID) in the returned credential object, and configure the required access policy. - -Save the response's `credentials_id` field, which is the ID for your new credential configuration object. - -For information about how to create a new workspace with command, see [Create a new workspace using the Account API](http://docs.databricks.com/administration-guide/account-api/new-workspace.html) - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account credentials delete` - Delete credential configuration. - -Deletes a Databricks credential configuration object for an account, both specified by ID. You cannot delete a credential that is associated with any workspace. - -### `databricks account credentials get` - Get credential configuration. - -Gets a Databricks credential configuration object for an account, both specified by ID. - -### `databricks account credentials list` - Get all credential configurations. - -Gets all Databricks credential configurations associated with an account specified by ID. - -## `databricks current-user` - command allows retrieving information about currently authenticated user or service principal. - -**NOTE** **this command may change** - -command allows retrieving information about currently authenticated user or -service principal. - -### `databricks current-user me` - Get current user info. - -Get details about the current method caller's identity. - -## `databricks account custom-app-integration` - manage custom oauth app integrations. - -These commands enable administrators to manage custom oauth app integrations, which is required for -adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. - -**Note:** You can only add/use the OAuth custom application integrations when OAuth enrollment -status is enabled. - -### `databricks account custom-app-integration create` - Create Custom OAuth App Integration. - -Create Custom OAuth App Integration. - -You can retrieve the custom oauth app integration via :method:get. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--confidential` - indicates if an oauth client-secret should be generated. - -### `databricks account custom-app-integration delete` - Delete Custom OAuth App Integration. - -Delete an existing Custom OAuth App Integration. -You can retrieve the custom oauth app integration via :method:get. - -### `databricks account custom-app-integration get` - Get OAuth Custom App Integration. - -Gets the Custom OAuth App Integration for the given integration id. - -### `databricks account custom-app-integration list` - Get custom oauth app integrations. - -Get the list of custom oauth app integrations for the specified Databricks Account - -### `databricks account custom-app-integration update` - Updates Custom OAuth App Integration. - -Updates an existing custom OAuth App Integration. -You can retrieve the custom oauth app integration via :method:get. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks dashboards` - Databricks SQL Dashboards - -Manage SQL Dashboards from CLI. - -### `databricks dashboards create` - Create a dashboard object. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--dashboard-filters-enabled` - In the web application, query filters that share a name are coupled to a single selection box if this value is true. - * `--is-draft` - Draft dashboards only appear in list views for their owners. - * `--is-trashed` - Indicates whether the dashboard is trashed. - * `--name` - The title of this dashboard that appears in list views and at the top of the dashboard page. - * `--parent` - The identifier of the workspace folder containing the dashboard. - -### `databricks dashboards delete` - Remove a dashboard. - -Moves a dashboard to the trash. Trashed dashboards do not appear in list views or searches, and cannot be shared. - -### `databricks dashboards get` - Retrieve a definition. - -Returns a JSON representation of a dashboard object, including its visualization and query objects. - -### `databricks dashboards list` - Get dashboard objects. - -Fetch a paginated list of dashboard objects. - -Flags: - * `--order` - Name of dashboard attribute to order by. - * `--page` - Page number to retrieve. - * `--page-size` - Number of dashboards to return per page. - * `--q` - Full text search term. - -### `databricks dashboards restore` - Restore a dashboard. - -A restored dashboard appears in list views and searches and can be shared. - -## `databricks data-sources` - command is provided to assist you in making new query objects. - -command is provided to assist you in making new query objects. When creating a query object, -you may optionally specify a `data_source_id` for the SQL warehouse against which it will run. -If you don't already know the `data_source_id` for your desired SQL warehouse, command will -help you find it. - -command does not support searches. It returns the full list of SQL warehouses in your -workspace. We advise you to use any text editor, REST client, or `grep` to search the -response from command for the name of your SQL warehouse as it appears in Databricks SQL. - -### `databricks data-sources list` - Get a list of SQL warehouses. - -Retrieves a full list of SQL warehouses available in this workspace. -All fields that appear in command response are enumerated for clarity. -However, you need only a SQL warehouse's `id` to create new queries against it. - -## `databricks account encryption-keys` - manage encryption key configurations. - -These commands manage encryption key configurations for this workspace (optional). A key -configuration encapsulates the AWS KMS key information and some information about how -the key configuration can be used. There are two possible uses for key configurations: - -* Managed services: A key configuration can be used to encrypt a workspace's notebook and -secret data in the control plane, as well as Databricks SQL queries and query history. -* Storage: A key configuration can be used to encrypt a workspace's DBFS and EBS data in -the data plane. - -In both of these cases, the key configuration's ID is used when creating a new workspace. -This Preview feature is available if your account is on the E2 version of the platform. -Updating a running workspace with workspace storage encryption requires that the workspace -is on the E2 version of the platform. If you have an older workspace, it might not be on -the E2 version of the platform. If you are not sure, contact your Databricks representative. - -### `databricks account encryption-keys create` - Create encryption key configuration. - -Creates a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If the key is assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for workspace storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data. - -**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions. - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account encryption-keys delete` - Delete encryption key configuration. - -Deletes a customer-managed key configuration object for an account. You cannot delete a configuration that is associated with a running workspace. - -### `databricks account encryption-keys get` - Get encryption key configuration. - -Gets a customer-managed key configuration object for an account, specified by ID. This operation uploads a reference to a customer-managed key to Databricks. If assigned as a workspace's customer-managed key for managed services, Databricks uses the key to encrypt the workspaces notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If it is specified as a workspace's customer-managed key for storage, the key encrypts the workspace's root S3 bucket (which contains the workspace's root DBFS and system data) and, optionally, cluster EBS volume data. - -**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions. - -This operation is available only if your account is on the E2 version of the platform. - -### `databricks account encryption-keys list` - Get all encryption key configurations. - -Gets all customer-managed key configuration objects for an account. If the key is specified as a workspace's managed services customer-managed key, Databricks uses the key to encrypt the workspace's notebooks and secrets in the control plane, in addition to Databricks SQL queries and query history. If the key is specified as a workspace's storage customer-managed key, the key is used to encrypt the workspace's root S3 bucket and optionally can encrypt cluster EBS volumes data in the data plane. - -**Important**: Customer-managed keys are supported only for some deployment types, subscription types, and AWS regions. - -This operation is available only if your account is on the E2 version of the platform. - -## `databricks experiments` - Manage MLflow experiments - -### `databricks experiments create-experiment` - Create experiment. - -Creates an experiment with a name. Returns the ID of the newly created experiment. -Validates that another experiment with the same name does not already exist and fails -if another experiment with the same name already exists. - -Throws `RESOURCE_ALREADY_EXISTS` if a experiment with the given name exists. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--artifact-location` - Location where all artifacts for the experiment are stored. - -### `databricks experiments create-run` - Create a run. - -Creates a new run within an experiment. -A run is usually a single execution of a machine learning or data ETL pipeline. -MLflow uses runs to track the `mlflowParam`, `mlflowMetric` and `mlflowRunTag` associated with a single execution. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--experiment-id` - ID of the associated experiment. - * `--start-time` - Unix timestamp in milliseconds of when the run started. - * `--user-id` - ID of the user executing the run. - -### `databricks experiments delete-experiment` - Delete an experiment. - -Marks an experiment and associated metadata, runs, metrics, params, and tags for deletion. -If the experiment uses FileStore, artifacts associated with experiment are also deleted. - -### `databricks experiments delete-run` - Delete a run. - -Marks a run for deletion. - -### `databricks experiments delete-tag` - Delete a tag. - -Deletes a tag on a run. Tags are run metadata that can be updated during a run and after a run completes. - -### `databricks experiments get-by-name` - Get metadata. - -Gets metadata for an experiment. - -This endpoint will return deleted experiments, but prefers the active experiment if an active and deleted experiment -share the same name. If multiple deleted experiments share the same name, the API will return one of them. - -Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name exists.S - -### `databricks experiments get-experiment` - Get an experiment. - -Gets metadata for an experiment. This method works on deleted experiments. -Flags: - - - -### `databricks experiments get-history` - Get history of a given metric within a run. - -Gets a list of all values for the specified metric for a given run. - -Flags: - * `--max-results` - Maximum number of Metric records to return per paginated request. - * `--run-id` - ID of the run from which to fetch metric values. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run from which to fetch metric values. - -### `databricks experiments get-run` - Get a run. - -Gets the metadata, metrics, params, and tags for a run. -In the case where multiple metrics with the same key are logged for a run, return only the value -with the latest timestamp. - -If there are multiple values with the latest timestamp, return the maximum of these values. - -Flags: - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run to fetch. - -### `databricks experiments list-artifacts` - Get all artifacts. - -List artifacts for a run. Takes an optional `artifact_path` prefix. If it is specified, the response contains only artifacts with the specified prefix.", - -Flags: - * `--path` - Filter artifacts matching this path (a relative path from the root artifact directory). - * `--run-id` - ID of the run whose artifacts to list. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run whose artifacts to list. - -### `databricks experiments list-experiments` - List experiments. - -List experiments. - -Gets a list of all experiments. -Flags: - * `--max-results` - Maximum number of experiments desired. - * `--view-type` - Qualifier for type of experiments to be returned. - -### `databricks experiments log-batch` - Log a batch. - -Logs a batch of metrics, params, and tags for a run. -If any data failed to be persisted, the server will respond with an error (non-200 status code). - -In case of error (due to internal server error or an invalid request), partial data may be written. - -You can write metrics, params, and tags in interleaving fashion, but within a given entity type are guaranteed to follow -the order specified in the request body. - -The overwrite behavior for metrics, params, and tags is as follows: - -* Metrics: metric values are never overwritten. - Logging a metric (key, value, timestamp) appends to the set of values for the metric with the provided key. - -* Tags: tag values can be overwritten by successive writes to the same tag key. - That is, if multiple tag values with the same key are provided in the same API request, - the last-provided tag value is written. Logging the same tag (key, value) is permitted. Specifically, logging a tag is idempotent. - -* Parameters: once written, param values cannot be changed (attempting to overwrite a param value will result in an error). - However, logging the same param (key, value) is permitted. Specifically, logging a param is idempotent. - - Request Limits - ------------------------------- - A single JSON-serialized API request may be up to 1 MB in size and contain: - - * No more than 1000 metrics, params, and tags in total - * Up to 1000 metrics - Up to 100 params - * Up to 100 tags - - For example, a valid request might contain 900 metrics, 50 params, and 50 tags, but logging 900 metrics, 50 params, - and 51 tags is invalid. - - The following limits also apply to metric, param, and tag keys and values: - - * Metric keyes, param keys, and tag keys can be up to 250 characters in length - * Parameter and tag values can be up to 250 characters in length - - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--run-id` - ID of the run to log under. - -### `databricks experiments log-metric` - Log a metric. - -Logs a metric for a run. A metric is a key-value pair (string key, float value) with an associated timestamp. -Examples include the various metrics that represent ML model accuracy. A metric can be logged multiple times. - -Flags: - * `--run-id` - ID of the run under which to log the metric. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run under which to log the metric. - * `--step` - Step at which to log the metric. - -### `databricks experiments log-model` - Log a model. - -**NOTE:** Experimental: command may change or be removed in a future release without warning. - -Flags: - * `--model-json` - MLmodel file in json format. - * `--run-id` - ID of the run to log under. - -### `databricks experiments log-param` - Log a param. - -Logs a param used for a run. A param is a key-value pair (string key, string value). -Examples include hyperparameters used for ML model training and constant dates and values used in an ETL pipeline. -A param can be logged only once for a run. - -Flags: - * `--run-id` - ID of the run under which to log the param. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run under which to log the param. - -### `databricks experiments restore-experiment` - Restores an experiment. - -Restore an experiment marked for deletion. This also restores associated metadata, runs, metrics, params, and tags. If experiment uses FileStore, underlying artifacts associated with experiment are also restored. Throws `RESOURCE_DOES_NOT_EXIST` if experiment was never created or was permanently deleted.", - -### `databricks experiments restore-run` - Restore a run. - -Restores a deleted run. - -### `databricks experiments search-experiments` - Search experiments. - -Searches for experiments that satisfy specified search criteria. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - String representing a SQL filter condition (e.g. - * `--max-results` - Maximum number of experiments desired. - * `--view-type` - Qualifier for type of experiments to be returned. - -### `databricks experiments search-runs` - Search for runs. - -Searches for runs that satisfy expressions. - -Search expressions can use `mlflowMetric` and `mlflowParam` keys.", - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - A filter expression over params, metrics, and tags, that allows returning a subset of runs. - * `--max-results` - Maximum number of runs desired. - * `--run-view-type` - Whether to display only active, only deleted, or all runs. - -### `databricks experiments set-experiment-tag` - Set a tag. - -Sets a tag on an experiment. Experiment tags are metadata that can be updated. - -### `databricks experiments set-tag` - Set a tag. - -Sets a tag on a run. Tags are run metadata that can be updated during a run and after -a run completes. -Flags: - * `--run-id` - ID of the run under which to log the tag. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run under which to log the tag. - -### `databricks experiments update-experiment` - Update an experiment. - -Updates experiment metadata. -Flags: - * `--new-name` - If provided, the experiment's name is changed to the new name. - -### `databricks experiments update-run` - Update a run. - -Updates run metadata. -Flags: - * `--end-time` - Unix timestamp in milliseconds of when the run ended. - * `--run-id` - ID of the run to update. - * `--run-uuid` - [Deprecated, use run_id instead] ID of the run to update. - * `--status` - Updated status of the run. - -## `databricks external-locations` - manage cloud storage path with a storage credential that authorizes access to it. - -An external location is an object that combines a cloud storage path with a storage -credential that authorizes access to the cloud storage path. Each external location is -subject to Unity Catalog access-control policies that control which users and groups can -access the credential. If a user does not have access to an external location in Unity -Catalog, the request fails and Unity Catalog does not attempt to authenticate to your cloud -tenant on the user’s behalf. - -Databricks recommends using external locations rather than using storage credentials -directly. - -To create external locations, you must be a metastore admin or a user with the -**CREATE_EXTERNAL_LOCATION** privilege. - -### `databricks external-locations create` - Create an external location. - -Creates a new external location entry in the metastore. -The caller must be a metastore admin or have the **CREATE_EXTERNAL_LOCATION** privilege on both the metastore and the associated storage credential. - -Flags: - * `--comment` - User-provided free-form text description. - * `--read-only` - Indicates whether the external location is read-only. - * `--skip-validation` - Skips validation of the storage credential associated with the external location. - -### `databricks external-locations delete` - Delete an external location. - -Deletes the specified external location from the metastore. The caller must be the owner of the external location. - -Flags: - * `--force` - Force deletion even if there are dependent external tables or mounts. - -### `databricks external-locations get` - Get an external location. - -Gets an external location from the metastore. The caller must be either a metastore admin, the owner of the external location, or a user that has some privilege on the external location. - -### `databricks external-locations list` - List external locations. - -Gets an array of external locations (__ExternalLocationInfo__ objects) from the metastore. -The caller must be a metastore admin, the owner of the external location, or a user that has some privilege on the external location. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks external-locations update` - Update an external location. - -Updates an external location in the metastore. The caller must be the owner of the external location, or be a metastore admin. -In the second case, the admin can only update the name of the external location. - -Flags: - * `--comment` - User-provided free-form text description. - * `--credential-name` - Name of the storage credential used with this location. - * `--force` - Force update even if changing url invalidates dependent external tables or mounts. - * `--name` - Name of the external location. - * `--owner` - The owner of the external location. - * `--read-only` - Indicates whether the external location is read-only. - * `--url` - Path URL of the external location. - -## `databricks functions` - Functions implement User-Defined Functions (UDFs) in Unity Catalog. - -The function implementation can be any SQL expression or Query, and it can be invoked wherever a table reference is allowed in a query. -In Unity Catalog, a function resides at the same level as a table, so it can be referenced with the form __catalog_name__.__schema_name__.__function_name__. - -### `databricks functions create` - Create a function. - -Creates a new function - -The user must have the following permissions in order for the function to be created: -- **USE_CATALOG** on the function's parent catalog -- **USE_SCHEMA** and **CREATE_FUNCTION** on the function's parent schema - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body -* `--comment` - User-provided free-form text description. - * `--external-language` - External function language. - * `--external-name` - External function name. - * `--sql-path` - List of schemes whose objects can be referenced without qualification. - -### `databricks functions delete` - Delete a function. - -Deletes the function that matches the supplied name. -For the deletion to succeed, the user must satisfy one of the following conditions: -- Is the owner of the function's parent catalog -- Is the owner of the function's parent schema and have the **USE_CATALOG** privilege on its parent catalog -- Is the owner of the function itself and have both the **USE_CATALOG** privilege on its parent catalog and the **USE_SCHEMA** privilege on its parent schema - -Flags: - * `--force` - Force deletion even if the function is notempty. - -### `databricks functions get` - Get a function. - -Gets a function from within a parent catalog and schema. -For the fetch to succeed, the user must satisfy one of the following requirements: -- Is a metastore admin -- Is an owner of the function's parent catalog -- Have the **USE_CATALOG** privilege on the function's parent catalog and be the owner of the function -- Have the **USE_CATALOG** privilege on the function's parent catalog, the **USE_SCHEMA** privilege on the function's parent schema, and the **EXECUTE** privilege on the function itself - -### `databricks functions list` - List functions. - -List functions within the specified parent catalog and schema. -If the user is a metastore admin, all functions are returned in the output list. -Otherwise, the user must have the **USE_CATALOG** privilege on the catalog and the **USE_SCHEMA** privilege on the schema, and the output list contains only functions for which either the user has the **EXECUTE** privilege or the user is the owner. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks functions update` - Update a function. - -Updates the function that matches the supplied name. -Only the owner of the function can be updated. If the user is not a metastore admin, the user must be a member of the group that is the new function owner. -- Is a metastore admin -- Is the owner of the function's parent catalog -- Is the owner of the function's parent schema and has the **USE_CATALOG** privilege on its parent catalog -- Is the owner of the function itself and has the **USE_CATALOG** privilege on its parent catalog as well as the **USE_SCHEMA** privilege on the function's parent schema. - -Flags: - * `--owner` - Username of current owner of function. - -## `databricks git-credentials` - Registers personal access token for Databricks to do operations on behalf of the user. - -See [more info](https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html). - -### `databricks git-credentials create` - Create a credential entry. - -Creates a Git credential entry for the user. Only one Git credential per user is -supported, so any attempts to create credentials if an entry already exists will -fail. Use the PATCH endpoint to update existing credentials, or the DELETE endpoint to -delete existing credentials. - -Flags: - * `--git-username` - Git username. - * `--personal-access-token` - The personal access token used to authenticate to the corresponding Git provider. - -### `databricks git-credentials delete` - Delete a credential. - -Deletes the specified Git credential. - -### `databricks git-credentials get` - Get a credential entry. - -Gets the Git credential with the specified credential ID. - -### `databricks git-credentials list` - Get Git credentials. - -Lists the calling user's Git credentials. One credential per user is supported. - -### `databricks git-credentials update` - Update a credential. - -Updates the specified Git credential. -Flags: - * `--git-provider` - Git provider. - * `--git-username` - Git username. - * `--personal-access-token` - The personal access token used to authenticate to the corresponding Git provider. - -## `databricks global-init-scripts` - configure global initialization scripts for the workspace. - -The Global Init Scripts API enables Workspace administrators to configure global -initialization scripts for their workspace. These scripts run on every node in every cluster -in the workspace. - -**Important:** Existing clusters must be restarted to pick up any changes made to global -init scripts. -Global init scripts are run in order. If the init script returns with a bad exit code, -the Apache Spark container fails to launch and init scripts with later position are skipped. -If enough containers fail, the entire cluster fails with a `GLOBAL_INIT_SCRIPT_FAILURE` -error code. - -### `databricks global-init-scripts create` - Create init script. - -Creates a new global init script in this workspace. -Flags: - * `--enabled` - Specifies whether the script is enabled. - * `--position` - The position of a global init script, where 0 represents the first script to run, 1 is the second script to run, in ascending order. - -### `databricks global-init-scripts delete` - Delete init script. - -Deletes a global init script. - -### `databricks global-init-scripts get` - Get an init script. - -Gets all the details of a script, including its Base64-encoded contents. - -### `databricks global-init-scripts list` - Get init scripts. - -Get a list of all global init scripts for this workspace. This returns all properties for each script but **not** the script contents. -To retrieve the contents of a script, use the [get a global init script](#operation/get-script) operation. - -### `databricks global-init-scripts update` - Update init script. - -Updates a global init script, specifying only the fields to change. All fields are optional. -Unspecified fields retain their current value. - -Flags: - * `--enabled` - Specifies whether the script is enabled. - * `--position` - The position of a script, where 0 represents the first script to run, 1 is the second script to run, in ascending order. - -## `databricks grants` - Manage data access in Unity Catalog. - -In Unity Catalog, data is secure by default. Initially, users have no access to data in -a metastore. Access can be granted by either a metastore admin, the owner of an object, or -the owner of the catalog or schema that contains the object. Securable objects in Unity -Catalog are hierarchical and privileges are inherited downward. - -Securable objects in Unity Catalog are hierarchical and privileges are inherited downward. -This means that granting a privilege on the catalog automatically grants the privilege to -all current and future objects within the catalog. Similarly, privileges granted on a schema -are inherited by all current and future objects within that schema. - -### `databricks grants get` - Get permissions. - -Gets the permissions for a securable. - -Flags: - * `--principal` - If provided, only the permissions for the specified principal (user or group) are returned. - -### `databricks grants get-effective` - Get effective permissions. - -Gets the effective permissions for a securable. -Flags: - * `--principal` - If provided, only the effective permissions for the specified principal (user or group) are returned. - -### `databricks grants update` - Update permissions. - -Updates the permissions for a securable. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks groups` - Groups for identity management. - -Groups simplify identity management, making it easier to assign access to Databricks Workspace, data, -and other securable objects. - -It is best practice to assign access to workspaces and access-control policies in -Unity Catalog to groups, instead of to users individually. All Databricks Workspace identities can be -assigned as members of groups, and members inherit permissions that are assigned to their -group. - -### `databricks groups create` - Create a new group. - -Creates a group in the Databricks Workspace with a unique name, using the supplied group details. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--display-name` - String that represents a human-readable group name. - * `--external-id` - - * `--id` - Databricks group ID. - -### `databricks groups delete` - Delete a group. - -Deletes a group from the Databricks Workspace. - -### `databricks groups get` - Get group details. - -Gets the information for a specific group in the Databricks Workspace. - -### `databricks groups list` - List group details. - -Gets all details of the groups associated with the Databricks Workspace. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks groups patch` - Update group details. - -Partially updates the details of a group. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks groups update` - Replace a group. - -Updates the details of a group by replacing the entire group entity. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--display-name` - String that represents a human-readable group name. - * `--external-id` - - * `--id` - Databricks group ID. - -## `databricks account groups` - Account-level group management - -Groups simplify identity management, making it easier to assign access to Databricks Account, data, -and other securable objects. - -It is best practice to assign access to workspaces and access-control policies in -Unity Catalog to groups, instead of to users individually. All Databricks Account identities can be -assigned as members of groups, and members inherit permissions that are assigned to their -group. - -### `databricks account groups create` - Create a new group. - -Creates a group in the Databricks Account with a unique name, using the supplied group details. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--display-name` - String that represents a human-readable group name. - * `--external-id` - - * `--id` - Databricks group ID. - -### `databricks account groups delete` - Delete a group. - -Deletes a group from the Databricks Account. - -### `databricks account groups get` - Get group details. - -Gets the information for a specific group in the Databricks Account. - -### `databricks account groups list` - List group details. - -Gets all details of the groups associated with the Databricks Account. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks account groups patch` - Update group details. - -Partially updates the details of a group. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account groups update` - Replace a group. - -Updates the details of a group by replacing the entire group entity. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--display-name` - String that represents a human-readable group name. - * `--external-id` - - * `--id` - Databricks group ID. - -## `databricks instance-pools` - manage ready-to-use cloud instances which reduces a cluster start and auto-scaling times. - -Instance Pools API are used to create, edit, delete and list instance pools by using -ready-to-use cloud instances which reduces a cluster start and auto-scaling times. - -Databricks pools reduce cluster start and auto-scaling times by maintaining a set of idle, -ready-to-use instances. When a cluster is attached to a pool, cluster nodes are created using -the pool’s idle instances. If the pool has no idle instances, the pool expands by allocating -a new instance from the instance provider in order to accommodate the cluster’s request. -When a cluster releases an instance, it returns to the pool and is free for another cluster -to use. Only clusters attached to a pool can use that pool’s idle instances. - -You can specify a different pool for the driver node and worker nodes, or use the same pool -for both. - -Databricks does not charge DBUs while instances are idle in the pool. Instance provider -billing does apply. See pricing. - -### `databricks instance-pools create` - Create a new instance pool. - - -Creates a new instance pool using idle and ready-to-use cloud instances. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--enable-elastic-disk` - Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space. - * `--idle-instance-autotermination-minutes` - Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met. - * `--max-capacity` - Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances. - * `--min-idle-instances` - Minimum number of idle instances to keep in the instance pool. - -### `databricks instance-pools delete` - Delete an instance pool. - -Deletes the instance pool permanently. The idle instances in the pool are terminated asynchronously. - -### `databricks instance-pools edit` - Edit an existing instance pool. - -Modifies the configuration of an existing instance pool. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--enable-elastic-disk` - Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space. - * `--idle-instance-autotermination-minutes` - Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met. - * `--max-capacity` - Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances. - * `--min-idle-instances` - Minimum number of idle instances to keep in the instance pool. - -### `databricks instance-pools get` - Get instance pool information. - -Retrieve the information for an instance pool based on its identifier. - -### `databricks instance-pools list` - List instance pool info. - -Gets a list of instance pools with their statistics. - -## `databricks instance-profiles` - Manage instance profiles that users can launch clusters with. - -The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch -clusters with. Regular users can list the instance profiles available to them. -See [Secure access to S3 buckets](https://docs.databricks.com/administration-guide/cloud-configurations/aws/instance-profiles.html) using -instance profiles for more information. - -### `databricks instance-profiles add` - Register an instance profile. - -In the UI, you can select the instance profile when launching clusters. command is only available to admin users. - -Flags: - * `--iam-role-arn` - The AWS IAM role ARN of the role associated with the instance profile. - * `--is-meta-instance-profile` - By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile. - * `--skip-validation` - By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile. - -### `databricks instance-profiles edit` - Edit an instance profile. - -The only supported field to change is the optional IAM role ARN associated with -the instance profile. It is required to specify the IAM role ARN if both of -the following are true: - - * Your role name and instance profile name do not match. The name is the part - after the last slash in each ARN. - * You want to use the instance profile with [Databricks SQL Serverless](https://docs.databricks.com/sql/admin/serverless.html). - -To understand where these fields are in the AWS console, see -[Enable serverless SQL warehouses](https://docs.databricks.com/sql/admin/serverless.html). - -command is only available to admin users. - -Flags: - * `--iam-role-arn` - The AWS IAM role ARN of the role associated with the instance profile. - * `--is-meta-instance-profile` - By default, Databricks validates that it has sufficient permissions to launch instances with the instance profile. - -### `databricks instance-profiles list` - List available instance profiles. - -List the instance profiles that the calling user can use to launch a cluster. - -command is available to all users. - -### `databricks instance-profiles remove` - Remove the instance profile. - -Remove the instance profile with the provided ARN. -Existing clusters with this instance profile will continue to function. - -command is only accessible to admin users. - -## `databricks ip-access-lists` - enable admins to configure IP access lists. - -IP Access List enables admins to configure IP access lists. - -IP access lists affect web application access and commands access to this workspace only. -If the feature is disabled for a workspace, all access is allowed for this workspace. -There is support for allow lists (inclusion) and block lists (exclusion). - -When a connection is attempted: - 1. **First, all block lists are checked.** If the connection IP address matches any block list, the connection is rejected. - 2. **If the connection was not rejected by block lists**, the IP address is compared with the allow lists. - -If there is at least one allow list for the workspace, the connection is allowed only if the IP address matches an allow list. -If there are no allow lists for the workspace, all IP addresses are allowed. - -For all allow lists and block lists combined, the workspace supports a maximum of 1000 IP/CIDR values, where one CIDR counts as a single value. - -After changes to the IP access list feature, it can take a few minutes for changes to take effect. - -### `databricks ip-access-lists create` - Create access list. - -Creates an IP access list for this workspace. - -A list can be an allow list or a block list. -See the top of this file for a description of how the server treats allow lists and block lists at runtime. - -When creating or updating an IP access list: - - * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, - where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. - * If the new list would block the calling user's current IP, error 400 is returned with `error_code` value `INVALID_STATE`. - -It can take a few minutes for the changes to take effect. -**Note**: Your new IP access list has no effect until you enable the feature. See :method:workspaceconf/setStatus - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks ip-access-lists delete` - Delete access list. - -Deletes an IP access list, specified by its list ID. - -### `databricks ip-access-lists get` - Get access list. - -Gets an IP access list, specified by its list ID. - -### `databricks ip-access-lists list` - Get access lists. - -Gets all IP access lists for the specified workspace. - -### `databricks ip-access-lists replace` - Replace access list. - -Replaces an IP access list, specified by its ID. - -A list can include allow lists and block lists. See the top -of this file for a description of how the server treats allow lists and block lists at run time. When -replacing an IP access list: - * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, - where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` - value `QUOTA_EXCEEDED`. - * If the resulting list would block the calling user's current IP, error 400 is returned with `error_code` - value `INVALID_STATE`. -It can take a few minutes for the changes to take effect. Note that your resulting IP access list has no -effect until you enable the feature. See :method:workspaceconf/setStatus. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--list-id` - Universally unique identifier (UUID) of the IP access list. - -### `databricks ip-access-lists update` - Update access list. - -Updates an existing IP access list, specified by its ID. - -A list can include allow lists and block lists. -See the top of this file for a description of how the server treats allow lists and block lists at run time. - -When updating an IP access list: - - * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, - where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` value `QUOTA_EXCEEDED`. - * If the updated list would block the calling user's current IP, error 400 is returned with `error_code` value `INVALID_STATE`. - -It can take a few minutes for the changes to take effect. Note that your resulting IP access list has no effect until you enable -the feature. See :method:workspaceconf/setStatus. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--list-id` - Universally unique identifier (UUID) of the IP access list. - -## `databricks account ip-access-lists` - The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console. - -The Accounts IP Access List API enables account admins to configure IP access lists for -access to the account console. - -Account IP Access Lists affect web application access and commands access to the account -console and account APIs. If the feature is disabled for the account, all access is allowed -for this account. There is support for allow lists (inclusion) and block lists (exclusion). - -When a connection is attempted: - 1. **First, all block lists are checked.** If the connection IP address matches any block - list, the connection is rejected. - 2. **If the connection was not rejected by block lists**, the IP address is compared with - the allow lists. - -If there is at least one allow list for the account, the connection is allowed only if the -IP address matches an allow list. If there are no allow lists for the account, all IP -addresses are allowed. - -For all allow lists and block lists combined, the account supports a maximum of 1000 IP/CIDR -values, where one CIDR counts as a single value. - -After changes to the account-level IP access lists, it can take a few minutes for changes -to take effect. - -### `databricks account ip-access-lists create` - Create access list. - -Creates an IP access list for the account. - -A list can be an allow list or a block list. See the top of this file for a description of -how the server treats allow lists and block lists at runtime. - -When creating or updating an IP access list: - - * For all allow lists and block lists combined, the API supports a maximum of 1000 - IP/CIDR values, where one CIDR counts as a single value. Attempts to exceed that number - return error 400 with `error_code` value `QUOTA_EXCEEDED`. - * If the new list would block the calling user's current IP, error 400 is returned with - `error_code` value `INVALID_STATE`. - -It can take a few minutes for the changes to take effect. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account ip-access-lists delete` - Delete access list. - -Deletes an IP access list, specified by its list ID. - -### `databricks account ip-access-lists get` - Get IP access list. - -Gets an IP access list, specified by its list ID. - -### `databricks account ip-access-lists list` - Get access lists. - -Gets all IP access lists for the specified account. - -### `databricks account ip-access-lists replace` - Replace access list. - -Replaces an IP access list, specified by its ID. - -A list can include allow lists and block lists. See the top of this file for a description -of how the server treats allow lists and block lists at run time. When replacing an IP -access list: - * For all allow lists and block lists combined, the API supports a maximum of 1000 IP/CIDR values, - where one CIDR counts as a single value. Attempts to exceed that number return error 400 with `error_code` - value `QUOTA_EXCEEDED`. - * If the resulting list would block the calling user's current IP, error 400 is returned with `error_code` - value `INVALID_STATE`. -It can take a few minutes for the changes to take effect. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--list-id` - Universally unique identifier (UUID) of the IP access list. - -### `databricks account ip-access-lists update` - Update access list. - -Updates an existing IP access list, specified by its ID. - -A list can include allow lists and block lists. See the top of this file for a description -of how the server treats allow lists and block lists at run time. - -When updating an IP access list: - - * For all allow lists and block lists combined, the API supports a maximum of 1000 - IP/CIDR values, where one CIDR counts as a single value. Attempts to exceed that number - return error 400 with `error_code` value `QUOTA_EXCEEDED`. - * If the updated list would block the calling user's current IP, error 400 is returned - with `error_code` value `INVALID_STATE`. - -It can take a few minutes for the changes to take effect. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--list-id` - Universally unique identifier (UUID) of the IP access list. - -## `databricks jobs` - Manage Databricks Workflows. - -You can use a Databricks job to run a data processing or data analysis task in a Databricks -cluster with scalable resources. Your job can consist of a single task or can be a large, -multi-task workflow with complex dependencies. Databricks manages the task orchestration, -cluster management, monitoring, and error reporting for all of your jobs. You can run your -jobs immediately or periodically through an easy-to-use scheduling system. You can implement -job tasks using notebooks, JARS, Delta Live Tables pipelines, or Python, Scala, Spark -submit, and Java applications. - -You should never hard code secrets or store them in plain text. Use the :service:secrets to manage secrets in the -[Databricks CLI](https://docs.databricks.com/dev-tools/cli/index.html). -Use the [Secrets utility](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-secrets) to reference secrets in notebooks and jobs. - -### `databricks jobs cancel-all-runs` - Cancel all runs of a job. - -Cancels all active runs of a job. The runs are canceled asynchronously, so it doesn't -prevent new runs from being started. - -### `databricks jobs cancel-run` - Cancel a job run. - -Cancels a job run. The run is canceled asynchronously, so it may still be running when -this request completes. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - -### `databricks jobs create` - Create a new job. - -Create a new job. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--format` - Used to tell what is the format of the job. - * `--max-concurrent-runs` - An optional maximum allowed number of concurrent runs of the job. - * `--name` - An optional name for the job. - * `--timeout-seconds` - An optional timeout applied to each run of this job. - -### `databricks jobs delete` - Delete a job. - -Deletes a job. - -### `databricks jobs delete-run` - Delete a job run. - -Deletes a non-active run. Returns an error if the run is active. - -### `databricks jobs export-run` - Export and retrieve a job run. - -Export and retrieve the job run task. - -Flags: - * `--views-to-export` - Which views to export (CODE, DASHBOARDS, or ALL). - -### `databricks jobs get` - Get a single job. - -Retrieves the details for a single job. - -### `databricks jobs get-run` - Get a single job run. - -Retrieve the metadata of a run. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - * `--include-history` - Whether to include the repair history in the response. - -### `databricks jobs get-run-output` - Get the output for a single run. - -Retrieve the output and metadata of a single task run. When a notebook task returns -a value through the `dbutils.notebook.exit()` call, you can use this endpoint to retrieve -that value. Databricks restricts command to returning the first 5 MB of the output. -To return a larger result, you can store job results in a cloud storage service. - -This endpoint validates that the __run_id__ parameter is valid and returns an HTTP status -code 400 if the __run_id__ parameter is invalid. Runs are automatically removed after -60 days. If you to want to reference them beyond 60 days, you must save old run results -before they expire. - -### `databricks jobs list` - List all jobs. - -Retrieves a list of jobs. - -Flags: - * `--expand-tasks` - Whether to include task and cluster details in the response. - * `--limit` - The number of jobs to return. - * `--name` - A filter on the list based on the exact (case insensitive) job name. - * `--offset` - The offset of the first job to return, relative to the most recently created job. - -### `databricks jobs list-runs` - List runs for a job. - -List runs in descending order by start time. - -Flags: - * `--active-only` - If active_only is `true`, only active runs are included in the results; otherwise, lists both active and completed runs. - * `--completed-only` - If completed_only is `true`, only completed runs are included in the results; otherwise, lists both active and completed runs. - * `--expand-tasks` - Whether to include task and cluster details in the response. - * `--job-id` - The job for which to list runs. - * `--limit` - The number of runs to return. - * `--offset` - The offset of the first run to return, relative to the most recent run. - * `--run-type` - The type of runs to return. - * `--start-time-from` - Show runs that started _at or after_ this value. - * `--start-time-to` - Show runs that started _at or before_ this value. - -### `databricks jobs repair-run` - Repair a job run. - -Re-run one or more tasks. Tasks are re-run as part of the original job run. -They use the current job and task settings, and can be viewed in the history for the -original job run. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--latest-repair-id` - The ID of the latest repair. - * `--rerun-all-failed-tasks` - If true, repair all failed tasks. - -### `databricks jobs reset` - Overwrites all settings for a job. - -Overwrites all the settings for a specific job. Use the Update endpoint to update job settings partially. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks jobs run-now` - Trigger a new job run. - -Run a job and return the `run_id` of the triggered run. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--idempotency-token` - An optional token to guarantee the idempotency of job run requests. - -### `databricks jobs submit` - Create and trigger a one-time run. - -Submit a one-time run. This endpoint allows you to submit a workload directly without -creating a job. Runs submitted using this endpoint don’t display in the UI. Use the -`jobs/runs/get` API to check the run state after the job is submitted. - -Flags: - * `--no-wait` - do not wait to reach TERMINATED or SKIPPED state. - * `--timeout` - maximum amount of time to reach TERMINATED or SKIPPED state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--idempotency-token` - An optional token that can be used to guarantee the idempotency of job run requests. - * `--run-name` - An optional name for the run. - * `--timeout-seconds` - An optional timeout applied to each run of this job. - -### `databricks jobs update` - Partially updates a job. - -Add, update, or remove specific settings of an existing job. Use the ResetJob to overwrite all job settings. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks libraries` - Manage libraries on a cluster. - -The Libraries API allows you to install and uninstall libraries and get the status of -libraries on a cluster. - -To make third-party or custom code available to notebooks and jobs running on your clusters, -you can install a library. Libraries can be written in Python, Java, Scala, and R. You can -upload Java, Scala, and Python libraries and point to external packages in PyPI, Maven, and -CRAN repositories. - -Cluster libraries can be used by all notebooks running on a cluster. You can install a cluster -library directly from a public repository such as PyPI or Maven, using a previously installed -workspace library, or using an init script. - -When you install a library on a cluster, a notebook already attached to that cluster will not -immediately see the new library. You must first detach and then reattach the notebook to -the cluster. - -When you uninstall a library from a cluster, the library is removed only when you restart -the cluster. Until you restart the cluster, the status of the uninstalled library appears -as Uninstall pending restart. - -### `databricks libraries all-cluster-statuses` - Get all statuses. - -Get the status of all libraries on all clusters. A status will be available for all libraries installed on this cluster -via the API or the libraries UI as well as libraries set to be installed on all clusters via the libraries UI. - -### `databricks libraries cluster-status` - Get status. - -Get the status of libraries on a cluster. A status will be available for all libraries installed on this cluster via the API -or the libraries UI as well as libraries set to be installed on all clusters via the libraries UI. -The order of returned libraries will be as follows. - -1. Libraries set to be installed on this cluster will be returned first. - Within this group, the final order will be order in which the libraries were added to the cluster. - -2. Libraries set to be installed on all clusters are returned next. - Within this group there is no order guarantee. - -3. Libraries that were previously requested on this cluster or on all clusters, but now marked for removal. - Within this group there is no order guarantee. - -### `databricks libraries install` - Add a library. - -Add libraries to be installed on a cluster. -The installation is asynchronous; it happens in the background after the completion of this request. - -**Note**: The actual set of libraries to be installed on a cluster is the union of the libraries specified via this method and -the libraries set to be installed on all clusters via the libraries UI. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks libraries uninstall` - Uninstall libraries. - -Set libraries to be uninstalled on a cluster. The libraries won't be uninstalled until the cluster is restarted. -Uninstalling libraries that are not installed on the cluster will have no impact but is not an error. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks account log-delivery` - These commands manage log delivery configurations for this account. - -These commands manage log delivery configurations for this account. The two supported log types -for command are _billable usage logs_ and _audit logs_. This feature is in Public Preview. -This feature works with all account ID types. - -Log delivery works with all account types. However, if your account is on the E2 version of -the platform or on a select custom plan that allows multiple workspaces per account, you can -optionally configure different storage destinations for each workspace. Log delivery status -is also provided to know the latest status of log delivery attempts. -The high-level flow of billable usage delivery: - -1. **Create storage**: In AWS, [create a new AWS S3 bucket](https://docs.databricks.com/administration-guide/account-api/aws-storage.html) -with a specific bucket policy. Using Databricks APIs, call the Account API to create a [storage configuration object](#operation/create-storage-config) -that uses the bucket name. -2. **Create credentials**: In AWS, create the appropriate AWS IAM role. For full details, -including the required IAM role policies and trust relationship, see -[Billable usage log delivery](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html). -Using Databricks APIs, call the Account API to create a [credential configuration object](#operation/create-credential-config) -that uses the IAM role's ARN. -3. **Create log delivery configuration**: Using Databricks APIs, call the Account API to -[create a log delivery configuration](#operation/create-log-delivery-config) that uses -the credential and storage configuration objects from previous steps. You can specify if -the logs should include all events of that log type in your account (_Account level_ delivery) -or only events for a specific set of workspaces (_workspace level_ delivery). Account level -log delivery applies to all current and future workspaces plus account level logs, while -workspace level log delivery solely delivers logs related to the specified workspaces. -You can create multiple types of delivery configurations per account. - -For billable usage delivery: -* For more information about billable usage logs, see -[Billable usage log delivery](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html). -For the CSV schema, see the [Usage page](https://docs.databricks.com/administration-guide/account-settings/usage.html). -* The delivery location is `//billable-usage/csv/`, where `` is -the name of the optional delivery path prefix you set up during log delivery configuration. -Files are named `workspaceId=-usageMonth=.csv`. -* All billable usage logs apply to specific workspaces (_workspace level_ logs). You can -aggregate usage for your entire account by creating an _account level_ delivery -configuration that delivers logs for all current and future workspaces in your account. -* The files are delivered daily by overwriting the month's CSV file for each workspace. - -For audit log delivery: -* For more information about about audit log delivery, see -[Audit log delivery](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html), -which includes information about the used JSON schema. -* The delivery location is `//workspaceId=/date=/auditlogs_.json`. -Files may get overwritten with the same content multiple times to achieve exactly-once delivery. -* If the audit log delivery configuration included specific workspace IDs, only -_workspace-level_ audit logs for those workspaces are delivered. If the log delivery -configuration applies to the entire account (_account level_ delivery configuration), -the audit log delivery includes workspace-level audit logs for all workspaces in the account -as well as account-level audit logs. See -[Audit log delivery](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html) for details. -* Auditable events are typically available in logs within 15 minutes. - -### `databricks account log-delivery create` - Create a new log delivery configuration. - -Creates a new Databricks log delivery configuration to enable delivery of the specified type of logs to your storage location. This requires that you already created a [credential object](#operation/create-credential-config) (which encapsulates a cross-account service IAM role) and a [storage configuration object](#operation/create-storage-config) (which encapsulates an S3 bucket). - -For full details, including the required IAM role policies and bucket policies, see [Deliver and access billable usage logs](https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html) or [Configure audit logging](https://docs.databricks.com/administration-guide/account-settings/audit-logs.html). - -**Note**: There is a limit on the number of log delivery configurations available per account (each limit applies separately to each log type including billable usage and audit logs). You can create a maximum of two enabled account-level delivery configurations (configurations without a workspace filter) per type. Additionally, you can create two enabled workspace-level delivery configurations per workspace for each log type, which means that the same workspace ID can occur in the workspace filter for no more than two delivery configurations per log type. - -You cannot delete a log delivery configuration, but you can disable it (see [Enable or disable log delivery configuration](#operation/patch-log-delivery-config-status)). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account log-delivery get` - Get log delivery configuration. - -Gets a Databricks log delivery configuration object for an account, both specified by ID. - -### `databricks account log-delivery list` - Get all log delivery configurations. - -Gets all Databricks log delivery configurations associated with an account specified by ID. - -Flags: - * `--credentials-id` - Filter by credential configuration ID. - * `--status` - Filter by status `ENABLED` or `DISABLED`. - * `--storage-configuration-id` - Filter by storage configuration ID. - -### `databricks account log-delivery patch-status` - Enable or disable log delivery configuration. - -Enables or disables a log delivery configuration. Deletion of delivery configurations is not supported, so disable log delivery configurations that are no longer needed. Note that you can't re-enable a delivery configuration if this would violate the delivery configuration limits described under [Create log delivery](#operation/create-log-delivery-config). - -## `databricks account metastore-assignments` - These commands manage metastore assignments to a workspace. - -These commands manage metastore assignments to a workspace. - -### `databricks account metastore-assignments create` - Assigns a workspace to a metastore. - -Creates an assignment to a metastore for a workspace - -### `databricks account metastore-assignments delete` - Delete a metastore assignment. - -Deletes a metastore assignment to a workspace, leaving the workspace with no metastore. - -### `databricks account metastore-assignments get` - Gets the metastore assignment for a workspace. - -Gets the metastore assignment, if any, for the workspace specified by ID. If the workspace -is assigned a metastore, the mappig will be returned. If no metastore is assigned to the -workspace, the assignment will not be found and a 404 returned. - -### `databricks account metastore-assignments list` - Get all workspaces assigned to a metastore. - -Gets a list of all Databricks workspace IDs that have been assigned to given metastore. - -### `databricks account metastore-assignments update` - Updates a metastore assignment to a workspaces. - -Updates an assignment to a metastore for a workspace. Currently, only the default catalog -may be updated - -Flags: - * `--default-catalog-name` - The name of the default catalog for the metastore. - * `--metastore-id` - The unique ID of the metastore. - -## `databricks metastores` - Manage metastores in Unity Catalog. - -A metastore is the top-level container of objects in Unity Catalog. It stores data assets -(tables and views) and the permissions that govern access to them. Databricks account admins -can create metastores and assign them to Databricks workspaces to control which workloads -use each metastore. For a workspace to use Unity Catalog, it must have a Unity Catalog -metastore attached. - -Each metastore is configured with a root storage location in a cloud storage account. -This storage location is used for metadata and managed tables data. - -NOTE: This metastore is distinct from the metastore included in Databricks workspaces -created before Unity Catalog was released. If your workspace includes a legacy Hive -metastore, the data in that metastore is available in a catalog named hive_metastore. - -### `databricks metastores assign` - Create an assignment. - -Creates a new metastore assignment. -If an assignment for the same __workspace_id__ exists, it will be overwritten by the new __metastore_id__ and -__default_catalog_name__. The caller must be an account admin. - -### `databricks metastores create` - Create a metastore. - -Creates a new metastore based on a provided name and storage root path. - -Flags: - * `--region` - Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). - -### `databricks metastores current` - Get metastore assignment for workspace. - -Gets the metastore assignment for the workspace being accessed. - -### `databricks metastores delete` - Delete a metastore. - -Deletes a metastore. The caller must be a metastore admin. - -Flags: - * `--force` - Force deletion even if the metastore is not empty. - -### `databricks metastores get` - Get a metastore. - -Gets a metastore that matches the supplied ID. The caller must be a metastore admin to retrieve this info. - -### `databricks metastores list` - List metastores. - -Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin to retrieve this info. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks metastores maintenance` - Enables or disables auto maintenance on the metastore. - -Enables or disables auto maintenance on the metastore. - -### `databricks metastores summary` - Get a metastore summary. - -Gets information about a metastore. This summary includes the storage credential, the cloud vendor, the cloud region, and the global metastore ID. - -### `databricks metastores unassign` - Delete an assignment. - -Deletes a metastore assignment. The caller must be an account administrator. - -### `databricks metastores update` - Update a metastore. - -Updates information for a specific metastore. The caller must be a metastore admin. - -Flags: - * `--delta-sharing-organization-name` - The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name. - * `--delta-sharing-recipient-token-lifetime-in-seconds` - The lifetime of delta sharing recipient token in seconds. - * `--delta-sharing-scope` - The scope of Delta Sharing enabled for the metastore. - * `--name` - The user-specified name of the metastore. - * `--owner` - The owner of the metastore. - * `--privilege-model-version` - Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`). - * `--storage-root-credential-id` - UUID of storage credential to access the metastore storage_root. - -### `databricks metastores update-assignment` - Update an assignment. - -Updates a metastore assignment. This operation can be used to update __metastore_id__ or __default_catalog_name__ -for a specified Workspace, if the Workspace is already assigned a metastore. -The caller must be an account admin to update __metastore_id__; otherwise, the caller can be a Workspace admin. - -Flags: - * `--default-catalog-name` - The name of the default catalog for the metastore. - * `--metastore-id` - The unique ID of the metastore. - -## `databricks account metastores` - These commands manage Unity Catalog metastores for an account. - -These commands manage Unity Catalog metastores for an account. A metastore contains catalogs -that can be associated with workspaces - -### `databricks account metastores create` - Create metastore. - -Creates a Unity Catalog metastore. - -Flags: - * `--region` - Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). - -### `databricks account metastores delete` - Delete a metastore. - -Deletes a Databricks Unity Catalog metastore for an account, both specified by ID. - -### `databricks account metastores get` - Get a metastore. - -Gets a Databricks Unity Catalog metastore from an account, both specified by ID. - -### `databricks account metastores list` - Get all metastores associated with an account. - -Gets all Unity Catalog metastores associated with an account specified by ID. - -### `databricks account metastores update` - Update a metastore. - -Updates an existing Unity Catalog metastore. - -Flags: - * `--delta-sharing-organization-name` - The organization name of a Delta Sharing entity, to be used in Databricks-to-Databricks Delta Sharing as the official name. - * `--delta-sharing-recipient-token-lifetime-in-seconds` - The lifetime of delta sharing recipient token in seconds. - * `--delta-sharing-scope` - The scope of Delta Sharing enabled for the metastore. - * `--name` - The user-specified name of the metastore. - * `--owner` - The owner of the metastore. - * `--privilege-model-version` - Privilege model version of the metastore, of the form `major.minor` (e.g., `1.0`). - * `--storage-root-credential-id` - UUID of storage credential to access the metastore storage_root. - -## `databricks model-registry` - Expose commands for Model Registry. - -### `databricks model-registry approve-transition-request` - Approve transition request. - -Approves a model version stage transition request. - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry create-comment` - Post a comment. - -Posts a comment on a model version. A comment can be submitted either by a user or programmatically to display -relevant information about the model. For example, test results or deployment errors. - -### `databricks model-registry create-model` - Create a model. - -Creates a new registered model with the name specified in the request body. - -Throws `RESOURCE_ALREADY_EXISTS` if a registered model with the given name exists. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--description` - Optional description for registered model. - -### `databricks model-registry create-model-version` - Create a model version. - -Creates a model version. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--description` - Optional description for model version. - * `--run-id` - MLflow run ID for correlation, if `source` was generated by an experiment run in MLflow tracking server. - * `--run-link` - MLflow run link - this is the exact link of the run that generated this model version, potentially hosted at another instance of MLflow. - -### `databricks model-registry create-transition-request` - Make a transition request. - -Creates a model version stage transition request. - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry create-webhook` - Create a webhook. - -**NOTE**: This endpoint is in Public Preview. - -Creates a registry webhook. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--description` - User-specified description for the webhook. - * `--model-name` - Name of the model whose events would trigger this webhook. - * `--status` - This describes an enum. - -### `databricks model-registry delete-comment` - Delete a comment. - -Deletes a comment on a model version. - -### `databricks model-registry delete-model` - Delete a model. - -Deletes a registered model. - -### `databricks model-registry delete-model-tag` - Delete a model tag. - -Deletes the tag for a registered model. - -### `databricks model-registry delete-model-version` - Delete a model version. - -Deletes a model version. - -### `databricks model-registry delete-model-version-tag` - Delete a model version tag. - -Deletes a model version tag. - -### `databricks model-registry delete-transition-request` - Delete a ransition request. - -Cancels a model version stage transition request. - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry delete-webhook` - Delete a webhook. - -**NOTE:** This endpoint is in Public Preview. - -Deletes a registry webhook. - -Flags: - * `--id` - Webhook ID required to delete a registry webhook. - -### `databricks model-registry get-latest-versions` - Get the latest version. - -Gets the latest version of a registered model. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks model-registry get-model` - Get model. - -Get the details of a model. This is a Databricks Workspace version of the [MLflow endpoint](https://www.mlflow.org/docs/latest/rest-api.html#get-registeredmodel) -that also returns the model's Databricks Workspace ID and the permission level of the requesting user on the model. - -### `databricks model-registry get-model-version` - Get a model version. - -Get a model version. - -### `databricks model-registry get-model-version-download-uri` - Get a model version URI. - -Gets a URI to download the model version. - -### `databricks model-registry list-models` - List models. - -Lists all available registered models, up to the limit specified in __max_results__. - -Flags: - * `--max-results` - Maximum number of registered models desired. - * `--page-token` - Pagination token to go to the next page based on a previous query. - -### `databricks model-registry list-transition-requests` - List transition requests. - -Gets a list of all open stage transition requests for the model version. - -### `databricks model-registry list-webhooks` - List registry webhooks. - -**NOTE:** This endpoint is in Public Preview. - -Lists all registry webhooks. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--model-name` - If not specified, all webhooks associated with the specified events are listed, regardless of their associated model. - * `--page-token` - Token indicating the page of artifact results to fetch. - -### `databricks model-registry reject-transition-request` - Reject a transition request. - -Rejects a model version stage transition request. - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry rename-model` - Rename a model. - -Renames a registered model. - -Flags: - * `--new-name` - If provided, updates the name for this `registered_model`. - -### `databricks model-registry search-model-versions` - Searches model versions. - -Searches for specific model versions based on the supplied __filter__. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - String filter condition, like "name='my-model-name'". - * `--max-results` - Maximum number of models desired. - -### `databricks model-registry search-models` - Search models. - -Search for registered models based on the specified __filter__. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - String filter condition, like "name LIKE 'my-model-name'". - * `--max-results` - Maximum number of models desired. - -### `databricks model-registry set-model-tag` - Set a tag. - -Sets a tag on a registered model. - -### `databricks model-registry set-model-version-tag` - Set a version tag. - -Sets a model version tag. - -### `databricks model-registry test-registry-webhook` - Test a webhook. - -**NOTE:** This endpoint is in Public Preview. - -Tests a registry webhook. - -Flags: - * `--event` - If `event` is specified, the test trigger uses the specified event. - -### `databricks model-registry transition-stage` - Transition a stage. - -Transition a model version's stage. This is a Databricks Workspace version of the [MLflow endpoint](https://www.mlflow.org/docs/latest/rest-api.html#transition-modelversion-stage) -that also accepts a comment associated with the transition to be recorded.", - -Flags: - * `--comment` - User-provided comment on the action. - -### `databricks model-registry update-comment` - Update a comment. - -Post an edit to a comment on a model version. - -### `databricks model-registry update-model` - Update model. - -Updates a registered model. - -Flags: - * `--description` - If provided, updates the description for this `registered_model`. - -### `databricks model-registry update-model-version` - Update model version. - -Updates the model version. - -Flags: - * `--description` - If provided, updates the description for this `registered_model`. - -### `databricks model-registry update-webhook` - Update a webhook. - -**NOTE:** This endpoint is in Public Preview. - -Updates a registry webhook. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--description` - User-specified description for the webhook. - * `--status` - This describes an enum. - -## `databricks account networks` - Manage network configurations. - -These commands manage network configurations for customer-managed VPCs (optional). Its ID is used when creating a new workspace if you use customer-managed VPCs. - -### `databricks account networks create` - Create network configuration. - -Creates a Databricks network configuration that represents an VPC and its resources. The VPC will be used for new Databricks clusters. This requires a pre-existing VPC and subnets. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--vpc-id` - The ID of the VPC associated with this network. - -### `databricks account networks delete` - Delete a network configuration. - -Deletes a Databricks network configuration, which represents a cloud VPC and its resources. You cannot delete a network that is associated with a workspace. - -This operation is available only if your account is on the E2 version of the platform. - -### `databricks account networks get` - Get a network configuration. - -Gets a Databricks network configuration, which represents a cloud VPC and its resources. - -### `databricks account networks list` - Get all network configurations. - -Gets a list of all Databricks network configurations for an account, specified by ID. - -This operation is available only if your account is on the E2 version of the platform. - -## `databricks account o-auth-enrollment` - These commands enable administrators to enroll OAuth for their accounts, which is required for adding/using any OAuth published/custom application integration. - -These commands enable administrators to enroll OAuth for their accounts, which is required for adding/using any OAuth published/custom application integration. - -**Note:** Your account must be on the E2 version to use These commands, this is because OAuth -is only supported on the E2 version. - -### `databricks account o-auth-enrollment create` - Create OAuth Enrollment request. - -Create an OAuth Enrollment request to enroll OAuth for this account and optionally enable -the OAuth integration for all the partner applications in the account. - -The parter applications are: - - Power BI - - Tableau Desktop - - Databricks CLI - -The enrollment is executed asynchronously, so the API will return 204 immediately. The -actual enrollment take a few minutes, you can check the status via API :method:get. - -Flags: - * `--enable-all-published-apps` - If true, enable OAuth for all the published applications in the account. - -### `databricks account o-auth-enrollment get` - Get OAuth enrollment status. - -Gets the OAuth enrollment status for this Account. - -You can only add/use the OAuth published/custom application integrations when OAuth enrollment -status is enabled. - -## `databricks permissions` - Manage access for various users on different objects and endpoints. - -Permissions API are used to create read, write, edit, update and manage access for various -users on different objects and endpoints. - -### `databricks permissions get` - Get object permissions. - -Gets the permission of an object. Objects can inherit permissions from their parent objects or root objects. - -### `databricks permissions get-permission-levels` - Get permission levels. - -Gets the permission levels that a user can have on an object. - -### `databricks permissions set` - Set permissions. - -Sets permissions on object. Objects can inherit permissions from their parent objects and root objects. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks permissions update` - Update permission. - -Updates the permissions on an object. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks pipelines` - Manage Delta Live Tables from command-line. - -The Delta Live Tables API allows you to create, edit, delete, start, and view details about -pipelines. - -Delta Live Tables is a framework for building reliable, maintainable, and testable data -processing pipelines. You define the transformations to perform on your data, and Delta Live -Tables manages task orchestration, cluster management, monitoring, data quality, and error -handling. - -Instead of defining your data pipelines using a series of separate Apache Spark tasks, Delta -Live Tables manages how your data is transformed based on a target schema you define for each -processing step. You can also enforce data quality with Delta Live Tables expectations. -Expectations allow you to define expected data quality and specify how to handle records that -fail those expectations. - -### `databricks pipelines create` - Create a pipeline. - -Creates a new data processing pipeline based on the requested configuration. If successful, this method returns -the ID of the new pipeline. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--allow-duplicate-names` - If false, deployment will fail if name conflicts with that of another pipeline. - * `--catalog` - A catalog in Unity Catalog to publish data from this pipeline to. - * `--channel` - DLT Release Channel that specifies which version to use. - * `--continuous` - Whether the pipeline is continuous or triggered. - * `--development` - Whether the pipeline is in Development mode. - * `--dry-run` - - * `--edition` - Pipeline product edition. - * `--id` - Unique identifier for this pipeline. - * `--name` - Friendly identifier for this pipeline. - * `--photon` - Whether Photon is enabled for this pipeline. - * `--storage` - DBFS root directory for storing checkpoints and tables. - * `--target` - Target schema (database) to add tables in this pipeline to. - -### `databricks pipelines delete` - Delete a pipeline. - -Deletes a pipeline. - -### `databricks pipelines get` - Get a pipeline. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks pipelines get-update` - Get a pipeline update. - -Gets an update from an active pipeline. - -### `databricks pipelines list-pipeline-events` - List pipeline events. - -Retrieves events for a pipeline. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - Criteria to select a subset of results, expressed using a SQL-like syntax. - * `--max-results` - Max number of entries to return in a single page. - * `--page-token` - Page token returned by previous call. - -### `databricks pipelines list-pipelines` - List pipelines. - -Lists pipelines defined in the Delta Live Tables system. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--filter` - Select a subset of results based on the specified criteria. - * `--max-results` - The maximum number of entries to return in a single page. - * `--page-token` - Page token returned by previous call. - -### `databricks pipelines list-updates` - List pipeline updates. - -List updates for an active pipeline. - -Flags: - * `--max-results` - Max number of entries to return in a single page. - * `--page-token` - Page token returned by previous call. - * `--until-update-id` - If present, returns updates until and including this update_id. - -### `databricks pipelines reset` - Reset a pipeline. - -Resets a pipeline. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks pipelines start-update` - Queue a pipeline update. - -Starts or queues a pipeline update. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--cause` - - * `--full-refresh` - If true, this update will reset all tables before running. - -### `databricks pipelines stop` - Stop a pipeline. - -Stops a pipeline. - -Flags: - * `--no-wait` - do not wait to reach IDLE state. - * `--timeout` - maximum amount of time to reach IDLE state. - -### `databricks pipelines update` - Edit a pipeline. - -Updates a pipeline with the supplied configuration. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--allow-duplicate-names` - If false, deployment will fail if name has changed and conflicts the name of another pipeline. - * `--catalog` - A catalog in Unity Catalog to publish data from this pipeline to. - * `--channel` - DLT Release Channel that specifies which version to use. - * `--continuous` - Whether the pipeline is continuous or triggered. - * `--development` - Whether the pipeline is in Development mode. - * `--edition` - Pipeline product edition. - * `--expected-last-modified` - If present, the last-modified time of the pipeline settings before the edit. - * `--id` - Unique identifier for this pipeline. - * `--name` - Friendly identifier for this pipeline. - * `--photon` - Whether Photon is enabled for this pipeline. - * `--pipeline-id` - Unique identifier for this pipeline. - * `--storage` - DBFS root directory for storing checkpoints and tables. - * `--target` - Target schema (database) to add tables in this pipeline to. - -## `databricks policy-families` - View available policy families. - -View available policy families. A policy family contains a policy definition providing best -practices for configuring clusters for a particular use case. - -Databricks manages and provides policy families for several common cluster use cases. You -cannot create, edit, or delete policy families. - -Policy families cannot be used directly to create clusters. Instead, you create cluster -policies using a policy family. Cluster policies created using a policy family inherit the -policy family's policy definition. - -### `databricks policy-families get` - get cluster policy family. - -Do it. - -### `databricks policy-families list` - list policy families. - -Flags: - * `--max-results` - The max number of policy families to return. - * `--page-token` - A token that can be used to get the next page of results. - -## `databricks account private-access` - PrivateLink settings. - -These commands manage private access settings for this account. - -### `databricks account private-access create` - Create private access settings. - -Creates a private access settings object, which specifies how your workspace is -accessed over [AWS PrivateLink](https://aws.amazon.com/privatelink). To use AWS -PrivateLink, a workspace must have a private access settings object referenced -by ID in the workspace's `private_access_settings_id` property. - -You can share one private access settings with multiple workspaces in a single account. However, -private access settings are specific to AWS regions, so only workspaces in the same -AWS region can use a given private access settings object. - -Before configuring PrivateLink, read the -[Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--private-access-level` - The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. - * `--public-access-enabled` - Determines if the workspace can be accessed over public internet. - -### `databricks account private-access delete` - Delete a private access settings object. - -Deletes a private access settings object, which determines how your workspace is accessed over [AWS PrivateLink](https://aws.amazon.com/privatelink). - -Before configuring PrivateLink, read the [Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -### `databricks account private-access get` - Get a private access settings object. - -Gets a private access settings object, which specifies how your workspace is accessed over [AWS PrivateLink](https://aws.amazon.com/privatelink). - -Before configuring PrivateLink, read the [Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -### `databricks account private-access list` - Get all private access settings objects. - -Gets a list of all private access settings objects for an account, specified by ID. - -### `databricks account private-access replace` - Replace private access settings. - -Updates an existing private access settings object, which specifies how your workspace is -accessed over [AWS PrivateLink](https://aws.amazon.com/privatelink). To use AWS -PrivateLink, a workspace must have a private access settings object referenced by ID in -the workspace's `private_access_settings_id` property. - -This operation completely overwrites your existing private access settings object attached to your workspaces. -All workspaces attached to the private access settings are affected by any change. -If `public_access_enabled`, `private_access_level`, or `allowed_vpc_endpoint_ids` -are updated, effects of these changes might take several minutes to propagate to the -workspace API. - -You can share one private access settings object with multiple -workspaces in a single account. However, private access settings are specific to -AWS regions, so only workspaces in the same AWS region can use a given private access -settings object. - -Before configuring PrivateLink, read the -[Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--private-access-level` - The private access level controls which VPC endpoints can connect to the UI or API of any workspace that attaches this private access settings object. - * `--public-access-enabled` - Determines if the workspace can be accessed over public internet. - -## `databricks providers` - Delta Sharing Providers commands. - -Databricks Providers commands - -### `databricks providers create` - Create an auth provider. - -Creates a new authentication provider minimally based on a name and authentication type. -The caller must be an admin on the metastore. - -Flags: - * `--comment` - Description about the provider. - * `--recipient-profile-str` - This field is required when the __authentication_type__ is **TOKEN** or not provided. - -### `databricks providers delete` - Delete a provider. - -Deletes an authentication provider, if the caller is a metastore admin or is the owner of the provider. - -### `databricks providers get` - Get a provider. - -Gets a specific authentication provider. The caller must supply the name of the provider, and must either be a metastore admin or the owner of the provider. - -### `databricks providers list` - List providers. - -Gets an array of available authentication providers. -The caller must either be a metastore admin or the owner of the providers. -Providers not owned by the caller are not included in the response. -There is no guarantee of a specific ordering of the elements in the array. - -Flags: - * `--data-provider-global-metastore-id` - If not provided, all providers will be returned. - -### `databricks providers list-shares` - List shares by Provider. - -Gets an array of a specified provider's shares within the metastore where: - - * the caller is a metastore admin, or - * the caller is the owner. - -### `databricks providers update` - Update a provider. - -Updates the information for an authentication provider, if the caller is a metastore admin or is the owner of the provider. -If the update changes the provider name, the caller must be both a metastore admin and the owner of the provider. - -Flags: - * `--comment` - Description about the provider. - * `--name` - The name of the Provider. - * `--owner` - Username of Provider owner. - * `--recipient-profile-str` - This field is required when the __authentication_type__ is **TOKEN** or not provided. - -## `databricks account published-app-integration` - manage published OAuth app integrations like Tableau Cloud for Databricks in AWS cloud. - -These commands enable administrators to manage published oauth app integrations, which is required for -adding/using Published OAuth App Integration like Tableau Cloud for Databricks in AWS cloud. - -**Note:** You can only add/use the OAuth published application integrations when OAuth enrollment -status is enabled. - -### `databricks account published-app-integration create` - Create Published OAuth App Integration. - -Create Published OAuth App Integration. - -You can retrieve the published oauth app integration via :method:get. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--app-id` - app_id of the oauth published app integration. - -### `databricks account published-app-integration delete` - Delete Published OAuth App Integration. - -Delete an existing Published OAuth App Integration. -You can retrieve the published oauth app integration via :method:get. - -### `databricks account published-app-integration get` - Get OAuth Published App Integration. - -Gets the Published OAuth App Integration for the given integration id. - -### `databricks account published-app-integration list` - Get published oauth app integrations. - -Get the list of published oauth app integrations for the specified Databricks Account - -### `databricks account published-app-integration update` - Updates Published OAuth App Integration. - -Updates an existing published OAuth App Integration. You can retrieve the published oauth app integration via :method:get. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks queries` - These endpoints are used for CRUD operations on query definitions. - -These endpoints are used for CRUD operations on query definitions. Query definitions include -the target SQL warehouse, query text, name, description, tags, parameters, and visualizations. - -### `databricks queries create` - Create a new query definition. - -Creates a new query definition. Queries created with this endpoint belong to the authenticated user making the request. - -The `data_source_id` field specifies the ID of the SQL warehouse to run this query against. You can use the Data Sources API to see a complete list of available SQL warehouses. Or you can copy the `data_source_id` from an existing query. - -**Note**: You cannot add a visualization until you create the query. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--data-source-id` - The ID of the data source / SQL warehouse where this query will run. - * `--description` - General description that can convey additional information about this query such as usage notes. - * `--name` - The name or title of this query to display in list views. - * `--parent` - The identifier of the workspace folder containing the query. - * `--query` - The text of the query. - -### `databricks queries delete` - Delete a query. - -Moves a query to the trash. -Trashed queries immediately disappear from searches and list views, and they cannot be used for alerts. -The trash is deleted after 30 days. - -### `databricks queries get` - Get a query definition. - -Retrieve a query object definition along with contextual permissions information about the currently authenticated user. - -### `databricks queries list` - Get a list of queries. - -Gets a list of queries. Optionally, this list can be filtered by a search term. - -Flags: - * `--order` - Name of query attribute to order by. - * `--page` - Page number to retrieve. - * `--page-size` - Number of queries to return per page. - * `--q` - Full text search term. - -### `databricks queries restore` - Restore a query. - -Restore a query that has been moved to the trash. -A restored query appears in list views and searches. You can use restored queries for alerts. - -### `databricks queries update` - Change a query definition. - -Modify this query definition. - -**Note**: You cannot undo this operation. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--data-source-id` - The ID of the data source / SQL warehouse where this query will run. - * `--description` - General description that can convey additional information about this query such as usage notes. - * `--name` - The name or title of this query to display in list views. - * `--query` - The text of the query. - -## `databricks query-history` - Access the history of queries through SQL warehouses. - -Access the history of queries through SQL warehouses. - -### `databricks query-history list` - List Queries. - -List the history of queries through SQL warehouses. You can filter by user ID, warehouse ID, status, and time range. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--include-metrics` - Whether to include metrics about query. - * `--max-results` - Limit the number of results returned in one page. - * `--page-token` - A token that can be used to get the next page of results. - -## `databricks recipient-activation` - Delta Sharing recipient activation commands. - -Databricks Recipient Activation commands - -### `databricks recipient-activation get-activation-url-info` - Get a share activation URL. - -Gets an activation URL for a share. - -### `databricks recipient-activation retrieve-token` - Get an access token. - -Retrieve access token with an activation url. This is a public API without any authentication. - -## `databricks recipients` - Delta Sharing recipients. - -Databricks Recipients commands - -### `databricks recipients create` - Create a share recipient. - -Creates a new recipient with the delta sharing authentication type in the metastore. -The caller must be a metastore admin or has the **CREATE_RECIPIENT** privilege on the metastore. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - Description about the recipient. - * `--owner` - Username of the recipient owner. - * `--sharing-code` - The one-time sharing code provided by the data recipient. - -### `databricks recipients delete` - Delete a share recipient. - -Deletes the specified recipient from the metastore. The caller must be the owner of the recipient. - -### `databricks recipients get` - Get a share recipient. - -Gets a share recipient from the metastore if: - - * the caller is the owner of the share recipient, or: - * is a metastore admin - -### `databricks recipients list` - List share recipients. - -Gets an array of all share recipients within the current metastore where: - - * the caller is a metastore admin, or - * the caller is the owner. - -There is no guarantee of a specific ordering of the elements in the array. - -Flags: - * `--data-recipient-global-metastore-id` - If not provided, all recipients will be returned. - -### `databricks recipients rotate-token` - Rotate a token. - -Refreshes the specified recipient's delta sharing authentication token with the provided token info. -The caller must be the owner of the recipient. - -### `databricks recipients share-permissions` - Get recipient share permissions. - -Gets the share permissions for the specified Recipient. The caller must be a metastore admin or the owner of the Recipient. - -### `databricks recipients update` - Update a share recipient. - -Updates an existing recipient in the metastore. The caller must be a metastore admin or the owner of the recipient. -If the recipient name will be updated, the user must be both a metastore admin and the owner of the recipient. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - Description about the recipient. - * `--name` - Name of Recipient. - * `--owner` - Username of the recipient owner. - -## `databricks repos` - Manage their git repos. - -The Repos API allows users to manage their git repos. Users can use the API to access all -repos that they have manage permissions on. - -Databricks Repos is a visual Git client in Databricks. It supports common Git operations -such a cloning a repository, committing and pushing, pulling, branch management, and visual -comparison of diffs when committing. - -Within Repos you can develop code in notebooks or other files and follow data science and -engineering code development best practices using Git for version control, collaboration, -and CI/CD. - -### `databricks repos create` - Create a repo. - -Creates a repo in the workspace and links it to the remote Git repo specified. -Note that repos created programmatically must be linked to a remote Git repo, unlike repos created in the browser. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--path` - Desired path for the repo in the workspace. - -### `databricks repos delete` - Delete a repo. - -Deletes the specified repo. - -### `databricks repos get` - Get a repo. - -Returns the repo with the given repo ID. - -### `databricks repos list` - Get repos. - -Returns repos that the calling user has Manage permissions on. Results are paginated with each page containing twenty repos. - -Flags: - * `--next-page-token` - Token used to get the next page of results. - * `--path-prefix` - Filters repos that have paths starting with the given path prefix. - -### `databricks repos update` - Update a repo. - -Updates the repo to a different branch or tag, or updates the repo to the latest commit on the same branch. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--branch` - Branch that the local version of the repo is checked out to. - * `--tag` - Tag that the local version of the repo is checked out to. - -## `databricks schemas` - Manage schemas in Unity Catalog. - -A schema (also called a database) is the second layer of Unity Catalog’s three-level -namespace. A schema organizes tables, views and functions. To access (or list) a table or view in -a schema, users must have the USE_SCHEMA data permission on the schema and its parent catalog, -and they must have the SELECT permission on the table or view. - -### `databricks schemas create` - Create a schema. - -Creates a new schema for catalog in the Metatastore. The caller must be a metastore admin, or have the **CREATE_SCHEMA** privilege in the parent catalog. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--storage-root` - Storage root URL for managed tables within schema. - -### `databricks schemas delete` - Delete a schema. - -Deletes the specified schema from the parent catalog. The caller must be the owner of the schema or an owner of the parent catalog. - -### `databricks schemas get` - Get a schema. - -Gets the specified schema within the metastore. The caller must be a metastore admin, the owner of the schema, or a user that has the **USE_SCHEMA** privilege on the schema. - -### `databricks schemas list` - List schemas. - -Gets an array of schemas for a catalog in the metastore. If the caller is the metastore admin or the owner of the parent catalog, all schemas for the catalog will be retrieved. -Otherwise, only schemas owned by the caller (or for which the caller has the **USE_SCHEMA** privilege) will be retrieved. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks schemas update` - Update a schema. - -Updates a schema for a catalog. The caller must be the owner of the schema or a metastore admin. -If the caller is a metastore admin, only the __owner__ field can be changed in the update. -If the __name__ field must be updated, the caller must be a metastore admin or have the **CREATE_SCHEMA** privilege on the parent catalog. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--name` - Name of schema, relative to parent catalog. - * `--owner` - Username of current owner of schema. - -## `databricks secrets` - manage secrets, secret scopes, and access permissions. - -The Secrets API allows you to manage secrets, secret scopes, and access permissions. - -Sometimes accessing data requires that you authenticate to external data sources through JDBC. -Instead of directly entering your credentials into a notebook, use Databricks secrets to store -your credentials and reference them in notebooks and jobs. - -Administrators, secret creators, and users granted permission can read Databricks secrets. -While Databricks makes an effort to redact secret values that might be displayed in notebooks, -it is not possible to prevent such users from reading secrets. - -### `databricks secrets create-scope` - Create a new secret scope. - -The scope name must consist of alphanumeric characters, dashes, underscores, and periods, -and may not exceed 128 characters. The maximum number of scopes in a workspace is 100. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--initial-manage-principal` - The principal that is initially granted `MANAGE` permission to the created scope. - * `--scope-backend-type` - The backend type the scope will be created with. - -### `databricks secrets delete-acl` - Delete an ACL. - -Deletes the given ACL on the given scope. - -Users must have the `MANAGE` permission to invoke command. -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope, principal, or ACL exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets delete-scope` - Delete a secret scope. - -Deletes a secret scope. - -Throws `RESOURCE_DOES_NOT_EXIST` if the scope does not exist. Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets delete-secret` - Delete a secret. - -Deletes the secret stored in this secret scope. You must have `WRITE` or `MANAGE` permission on the secret scope. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope or secret exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets get-acl` - Get secret ACL details. - -Gets the details about the given ACL, such as the group and permission. -Users must have the `MANAGE` permission to invoke command. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets list-acls` - Lists ACLs. - -List the ACLs for a given secret scope. Users must have the `MANAGE` permission to invoke command. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets list-scopes` - List all scopes. - -Lists all secret scopes available in the workspace. - -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets list-secrets` - List secret keys. - -Lists the secret keys that are stored at this scope. -This is a metadata-only operation; secret data cannot be retrieved using command. -Users need the READ permission to make this call. - -The lastUpdatedTimestamp returned is in milliseconds since epoch. -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets put-acl` - Create/update an ACL. - -Creates or overwrites the Access Control List (ACL) associated with the given principal (user or group) on the -specified scope point. - -In general, a user or group will use the most powerful permission available to them, -and permissions are ordered as follows: - -* `MANAGE` - Allowed to change ACLs, and read and write to this secret scope. -* `WRITE` - Allowed to read and write to this secret scope. -* `READ` - Allowed to read this secret scope and list what secrets are available. - -Note that in general, secret values can only be read from within a command on a cluster (for example, through a notebook). -There is no API to read the actual secret value material outside of a cluster. -However, the user's permission will be applied based on who is executing the command, and they must have at least READ permission. - -Users must have the `MANAGE` permission to invoke command. - -The principal is a user or group name corresponding to an existing Databricks principal to be granted or revoked access. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `RESOURCE_ALREADY_EXISTS` if a permission for the principal already exists. -Throws `INVALID_PARAMETER_VALUE` if the permission is invalid. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -### `databricks secrets put-secret` - Add a secret. - -Inserts a secret under the provided scope with the given name. -If a secret already exists with the same name, this command overwrites the existing secret's value. -The server encrypts the secret using the secret scope's encryption settings before storing it. - -You must have `WRITE` or `MANAGE` permission on the secret scope. -The secret key must consist of alphanumeric characters, dashes, underscores, and periods, and cannot exceed 128 characters. -The maximum allowed secret value size is 128 KB. The maximum number of secrets in a given scope is 1000. - -The input fields "string_value" or "bytes_value" specify the type of the secret, which will determine the value returned when -the secret value is requested. Exactly one must be specified. - -Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. -Throws `RESOURCE_LIMIT_EXCEEDED` if maximum number of secrets in scope is exceeded. -Throws `INVALID_PARAMETER_VALUE` if the key name or value length is invalid. -Throws `PERMISSION_DENIED` if the user does not have permission to make command call. - -Flags: - * `--bytes-value` - If specified, value will be stored as bytes. - * `--string-value` - If specified, note that the value will be stored in UTF-8 (MB4) form. - -## `databricks service-principals` - Manage service principals. - -Identities for use with jobs, automated tools, and systems such as scripts, apps, and -CI/CD platforms. Databricks recommends creating service principals to run production jobs -or modify production data. If all processes that act on production data run with service -principals, interactive users do not need any write, delete, or modify privileges in -production. This eliminates the risk of a user overwriting production data by accident. - -### `databricks service-principals create` - Create a service principal. - -Creates a new service principal in the Databricks Workspace. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--application-id` - UUID relating to the service principal. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks service principal ID. - -### `databricks service-principals delete` - Delete a service principal. - -Delete a single service principal in the Databricks Workspace. - -### `databricks service-principals get` - Get service principal details. - -Gets the details for a single service principal define in the Databricks Workspace. - -### `databricks service-principals list` - List service principals. - -Gets the set of service principals associated with a Databricks Workspace. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - - -### `databricks service-principals patch` - Update service principal details. - -Partially updates the details of a single service principal in the Databricks Workspace. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks service-principals update` - Replace service principal. - -Updates the details of a single service principal. - -This action replaces the existing service principal with the same name. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--application-id` - UUID relating to the service principal. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks service principal ID. - -## `databricks account service-principals` - Manage service principals on the account level. - -Identities for use with jobs, automated tools, and systems such as scripts, apps, and -CI/CD platforms. Databricks recommends creating service principals to run production jobs -or modify production data. If all processes that act on production data run with service -principals, interactive users do not need any write, delete, or modify privileges in -production. This eliminates the risk of a user overwriting production data by accident. - -### `databricks account service-principals create` - Create a service principal. - -Creates a new service principal in the Databricks Account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--application-id` - UUID relating to the service principal. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks service principal ID. - -### `databricks account service-principals delete` - Delete a service principal. - -Delete a single service principal in the Databricks Account. - -### `databricks account service-principals get` - Get service principal details. - -Gets the details for a single service principal define in the Databricks Account. - -### `databricks account service-principals list` - List service principals. - -Gets the set of service principals associated with a Databricks Account. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks account service-principals patch` - Update service principal details. - -Partially updates the details of a single service principal in the Databricks Account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account service-principals update` - Replace service principal. - -Updates the details of a single service principal. - -This action replaces the existing service principal with the same name. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--application-id` - UUID relating to the service principal. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks service principal ID. - -## `databricks serving-endpoints` - Manage model serving endpoints. - -The Serving Endpoints API allows you to create, update, and delete model serving endpoints. - -You can use a serving endpoint to serve models from the Databricks Model Registry. Endpoints expose -the underlying models as scalable commands endpoints using serverless compute. This means -the endpoints and associated compute resources are fully managed by Databricks and will not appear in -your cloud account. A serving endpoint can consist of one or more MLflow models from the Databricks -Model Registry, called served models. A serving endpoint can have at most ten served models. You can configure -traffic settings to define how requests should be routed to your served models behind an endpoint. -Additionally, you can configure the scale of resources that should be applied to each served model. - -### `databricks serving-endpoints build-logs` - Retrieve the logs associated with building the model's environment for a given serving endpoint's served model. - -Retrieve the logs associated with building the model's environment for a given serving endpoint's served model. - -Retrieves the build logs associated with the provided served model. - -### `databricks serving-endpoints create` - Create a new serving endpoint. - -Flags: - * `--no-wait` - do not wait to reach NOT_UPDATING state. - * `--timeout` - maximum amount of time to reach NOT_UPDATING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks serving-endpoints delete` - Delete a serving endpoint. - -Delete a serving endpoint. - -### `databricks serving-endpoints export-metrics` - Retrieve the metrics corresponding to a serving endpoint for the current time in Prometheus or OpenMetrics exposition format. - -Retrieve the metrics corresponding to a serving endpoint for the current time in Prometheus or OpenMetrics exposition format. - -Retrieves the metrics associated with the provided serving endpoint in either Prometheus or OpenMetrics exposition format. - -### `databricks serving-endpoints get` - Get a single serving endpoint. - -Retrieves the details for a single serving endpoint. - -### `databricks serving-endpoints list` - Retrieve all serving endpoints. - -Retrieve all serving endpoints. - -### `databricks serving-endpoints logs` - Retrieve the most recent log lines associated with a given serving endpoint's served model. - -Retrieves the service logs associated with the provided served model. - -### `databricks serving-endpoints query` - Query a serving endpoint with provided model input. - -Query a serving endpoint with provided model input. - -### `databricks serving-endpoints update-config` - Update a serving endpoint with a new config. - -Update a serving endpoint with a new config. - -Updates any combination of the serving endpoint's served models, the compute -configuration of those served models, and the endpoint's traffic config. -An endpoint that already has an update in progress can not be updated until -the current update completes or fails. - -Flags: - * `--no-wait` - do not wait to reach NOT_UPDATING state. - * `--timeout` - maximum amount of time to reach NOT_UPDATING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks shares` - Databricks Shares commands. - -Databricks Shares commands - -### `databricks shares create` - Create a share. - -Creates a new share for data objects. Data objects can be added after creation with **update**. -The caller must be a metastore admin or have the **CREATE_SHARE** privilege on the metastore. - -Flags: - * `--comment` - User-provided free-form text description. - -### `databricks shares delete` - Delete a share. - -Deletes a data object share from the metastore. The caller must be an owner of the share. - -### `databricks shares get` - Get a share. - -Gets a data object share from the metastore. The caller must be a metastore admin or the owner of the share. - -Flags: - * `--include-shared-data` - Query for data to include in the share. - -### `databricks shares list` - List shares. - -Gets an array of data object shares from the metastore. The caller must be a metastore admin or the owner of the share. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks shares share-permissions` - Get permissions. - -Gets the permissions for a data share from the metastore. -The caller must be a metastore admin or the owner of the share. - -### `databricks shares update` - Update a share. - -Updates the share with the changes and data objects in the request. -The caller must be the owner of the share or a metastore admin. - -When the caller is a metastore admin, only the __owner__ field can be updated. - -In the case that the share name is changed, **updateShare** requires that the caller is both the share owner and -a metastore admin. - -For each table that is added through this method, the share owner must also have **SELECT** privilege on the table. -This privilege must be maintained indefinitely for recipients to be able to access the table. -Typically, you should use a group as the share owner. - -Table removals through **update** do not require additional privileges. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - User-provided free-form text description. - * `--name` - Name of the share. - * `--owner` - Username of current owner of share. - -### `databricks shares update-permissions` - Update permissions. - -Updates the permissions for a data share in the metastore. -The caller must be a metastore admin or an owner of the share. - -For new recipient grants, the user must also be the owner of the recipients. -recipient revocations do not require additional privileges. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks account storage` - Manage storage configurations for this workspace. - -These commands manage storage configurations for this workspace. A root storage S3 bucket in -your account is required to store objects like cluster logs, notebook revisions, and job -results. You can also use the root storage S3 bucket for storage of non-production DBFS -data. A storage configuration encapsulates this bucket information, and its ID is used when -creating a new workspace. - -### `databricks account storage create` - Create new storage configuration. - -Creates new storage configuration for an account, specified by ID. Uploads a storage configuration object that represents the root AWS S3 bucket in your account. Databricks stores related workspace assets including DBFS, cluster logs, and job results. For the AWS S3 bucket, you need to configure the required bucket policy. - -For information about how to create a new workspace with command, see [Create a new workspace using the Account API](http://docs.databricks.com/administration-guide/account-api/new-workspace.html) - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account storage delete` - Delete storage configuration. - -Deletes a Databricks storage configuration. You cannot delete a storage configuration that is associated with any workspace. - -### `databricks account storage get` - Get storage configuration. - -Gets a Databricks storage configuration for an account, both specified by ID. - -### `databricks account storage list` - Get all storage configurations. - -Gets a list of all Databricks storage configurations for your account, specified by ID. - -## `databricks storage-credentials` - Manage storage credentials for Unity Catalog. - -A storage credential represents an authentication and authorization mechanism for accessing -data stored on your cloud tenant. Each storage credential is subject to -Unity Catalog access-control policies that control which users and groups can access -the credential. If a user does not have access to a storage credential in Unity Catalog, -the request fails and Unity Catalog does not attempt to authenticate to your cloud tenant -on the user’s behalf. - -Databricks recommends using external locations rather than using storage credentials -directly. - -To create storage credentials, you must be a Databricks account admin. The account admin -who creates the storage credential can delegate ownership to another user or group to -manage permissions on it. - -### `databricks storage-credentials create` - Create a storage credential. - -Creates a new storage credential. The request object is specific to the cloud: - - * **AwsIamRole** for AWS credentials - * **AzureServicePrincipal** for Azure credentials - * **GcpServiceAcountKey** for GCP credentials. - -The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - Comment associated with the credential. - * `--read-only` - Whether the storage credential is only usable for read operations. - * `--skip-validation` - Supplying true to this argument skips validation of the created credential. - -### `databricks storage-credentials delete` - Delete a credential. - -Deletes a storage credential from the metastore. The caller must be an owner of the storage credential. - -Flags: - * `--force` - Force deletion even if there are dependent external locations or external tables. - -### `databricks storage-credentials get` - Get a credential. - -Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have some permission on the storage credential. - -### `databricks storage-credentials list` - List credentials. - -Gets an array of storage credentials (as __StorageCredentialInfo__ objects). -The array is limited to only those storage credentials the caller has permission to access. -If the caller is a metastore admin, all storage credentials will be retrieved. -There is no guarantee of a specific ordering of the elements in the array. - -### `databricks storage-credentials update` - Update a credential. - -Updates a storage credential on the metastore. The caller must be the owner of the storage credential or a metastore admin. If the caller is a metastore admin, only the __owner__ credential can be changed. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--comment` - Comment associated with the credential. - * `--force` - Force update even if there are dependent external locations or external tables. - * `--name` - The credential name. - * `--owner` - Username of current owner of credential. - * `--read-only` - Whether the storage credential is only usable for read operations. - * `--skip-validation` - Supplying true to this argument skips validation of the updated credential. - -### `databricks storage-credentials validate` - Validate a storage credential. - -Validates a storage credential. At least one of __external_location_name__ and __url__ need to be provided. If only one of them is -provided, it will be used for validation. And if both are provided, the __url__ will be used for -validation, and __external_location_name__ will be ignored when checking overlapping urls. - -Either the __storage_credential_name__ or the cloud-specific credential must be provided. - -The caller must be a metastore admin or the storage credential owner or -have the **CREATE_EXTERNAL_LOCATION** privilege on the metastore and the storage credential. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--external-location-name` - The name of an existing external location to validate. - * `--read-only` - Whether the storage credential is only usable for read operations. - * `--url` - The external location url to validate. - -## `databricks account storage-credentials` - These commands manage storage credentials for a particular metastore. - -These commands manage storage credentials for a particular metastore. - -### `databricks account storage-credentials create` - Create a storage credential. - -Creates a new storage credential. The request object is specific to the cloud: - - * **AwsIamRole** for AWS credentials - * **AzureServicePrincipal** for Azure credentials - * **GcpServiceAcountKey** for GCP credentials. - -The caller must be a metastore admin and have the **CREATE_STORAGE_CREDENTIAL** privilege on the metastore. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body. - * `--comment` - Comment associated with the credential. - * `--read-only` - Whether the storage credential is only usable for read operations. - * `--skip-validation` - Supplying true to this argument skips validation of the created credential. - -### `databricks account storage-credentials get` - Gets the named storage credential. - -Gets a storage credential from the metastore. The caller must be a metastore admin, the owner of the storage credential, or have a level of privilege on the storage credential. - -### `databricks account storage-credentials list` - Get all storage credentials assigned to a metastore. - -Gets a list of all storage credentials that have been assigned to given metastore. - -## `databricks table-constraints` - Primary key and foreign key constraints encode relationships between fields in tables. - -Primary and foreign keys are informational only and are not enforced. Foreign keys must reference a primary key in another table. -This primary key is the parent constraint of the foreign key and the table this primary key is on is the parent table of the foreign key. -Similarly, the foreign key is the child constraint of its referenced primary key; the table of the foreign key is the child table of the primary key. - -You can declare primary keys and foreign keys as part of the table specification during table creation. -You can also add or drop constraints on existing tables. - -### `databricks table-constraints create` - Create a table constraint. - - -For the table constraint creation to succeed, the user must satisfy both of these conditions: -- the user must have the **USE_CATALOG** privilege on the table's parent catalog, - the **USE_SCHEMA** privilege on the table's parent schema, and be the owner of the table. -- if the new constraint is a __ForeignKeyConstraint__, - the user must have the **USE_CATALOG** privilege on the referenced parent table's catalog, - the **USE_SCHEMA** privilege on the referenced parent table's schema, - and be the owner of the referenced parent table. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks table-constraints delete` - Delete a table constraint. - -Deletes a table constraint. - -For the table constraint deletion to succeed, the user must satisfy both of these conditions: -- the user must have the **USE_CATALOG** privilege on the table's parent catalog, - the **USE_SCHEMA** privilege on the table's parent schema, and be the owner of the table. -- if __cascade__ argument is **true**, the user must have the following permissions on all of the child tables: - the **USE_CATALOG** privilege on the table's catalog, - the **USE_SCHEMA** privilege on the table's schema, - and be the owner of the table. - -## `databricks tables` - A table resides in the third layer of Unity Catalog’s three-level namespace. - -A table resides in the third layer of Unity Catalog’s three-level namespace. It contains -rows of data. To create a table, users must have CREATE_TABLE and USE_SCHEMA permissions on the schema, -and they must have the USE_CATALOG permission on its parent catalog. To query a table, users must -have the SELECT permission on the table, and they must have the USE_CATALOG permission on its -parent catalog and the USE_SCHEMA permission on its parent schema. - -A table can be managed or external. From an API perspective, a __VIEW__ is a particular kind of table (rather than a managed or external table). - -### `databricks tables delete` - Delete a table. - -Deletes a table from the specified parent catalog and schema. -The caller must be the owner of the parent catalog, have the **USE_CATALOG** privilege on the parent catalog and be the owner of the parent schema, -or be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. - -### `databricks tables get` - Get a table. - -Gets a table from the metastore for a specific catalog and schema. -The caller must be a metastore admin, be the owner of the table and have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema, -or be the owner of the table and have the **SELECT** privilege on it as well. - -Flags: - * `--include-delta-metadata` - Whether delta metadata should be included in the response. - -### `databricks tables list` - List tables. - -Gets an array of all tables for the current metastore under the parent catalog and schema. -The caller must be a metastore admin or an owner of (or have the **SELECT** privilege on) the table. -For the latter case, the caller must also be the owner or have the **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** privilege on the parent schema. -There is no guarantee of a specific ordering of the elements in the array. - -Flags: - * `--include-delta-metadata` - Whether delta metadata should be included in the response. - * `--max-results` - Maximum number of tables to return (page length). - * `--page-token` - Opaque token to send for the next page of results (pagination). - -### `databricks tables list-summaries` - List table summaries. - -Gets an array of summaries for tables for a schema and catalog within the metastore. The table summaries returned are either: - -* summaries for all tables (within the current metastore and parent catalog and schema), when the user is a metastore admin, or: -* summaries for all tables and schemas (within the current metastore and parent catalog) - for which the user has ownership or the **SELECT** privilege on the table and ownership or **USE_SCHEMA** privilege on the schema, - provided that the user also has ownership or the **USE_CATALOG** privilege on the parent catalog. - -There is no guarantee of a specific ordering of the elements in the array. - -Flags: - * `--max-results` - Maximum number of tables to return (page length). - * `--page-token` - Opaque token to send for the next page of results (pagination). - * `--schema-name-pattern` - A sql LIKE pattern (% and _) for schema names. - * `--table-name-pattern` - A sql LIKE pattern (% and _) for table names. - -## `databricks token-management` - Enables administrators to get all tokens and delete tokens for other users. - -Enables administrators to get all tokens and delete tokens for other users. Admins can -either get every token, get a specific token by ID, or get all tokens for a particular user. - -### `databricks token-management create-obo-token` - Create on-behalf token. - -Creates a token on behalf of a service principal. - -Flags: - * `--comment` - Comment that describes the purpose of the token. - -### `databricks token-management delete` - Delete a token. - -Deletes a token, specified by its ID. - -### `databricks token-management get` - Get token info. - -Gets information about a token, specified by its ID. - -### `databricks token-management list` - List all tokens. - -Lists all tokens associated with the specified workspace or user. - -Flags: - * `--created-by-id` - User ID of the user that created the token. - * `--created-by-username` - Username of the user that created the token. - -## `databricks tokens` - The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks commandss. - -The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks commandss. - -### `databricks tokens create` - Create a user token. - -Creates and returns a token for a user. If this call is made through token authentication, it creates -a token with the same client ID as the authenticated token. If the user's token quota is exceeded, this call -returns an error **QUOTA_EXCEEDED**. - -Flags: - * `--comment` - Optional description to attach to the token. - * `--lifetime-seconds` - The lifetime of the token, in seconds. - -### `databricks tokens delete` - Revoke token. - -Revokes an access token. - -If a token with the specified ID is not valid, this call returns an error **RESOURCE_DOES_NOT_EXIST**. - -### `databricks tokens list` - List tokens. - -Lists all the valid tokens for a user-workspace pair. - -## `databricks users` - Manage users on the workspace-level. - -Databricks recommends using SCIM provisioning to sync users and groups automatically from -your identity provider to your Databricks Workspace. SCIM streamlines onboarding a new -employee or team by using your identity provider to create users and groups in Databricks Workspace -and give them the proper level of access. When a user leaves your organization or no longer -needs access to Databricks Workspace, admins can terminate the user in your identity provider and that -user’s account will also be removed from Databricks Workspace. This ensures a consistent offboarding -process and prevents unauthorized users from accessing sensitive data. - -### `databricks users create` - Create a new user. - -Creates a new user in the Databricks Workspace. This new user will also be added to the Databricks account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks user ID. - * `--user-name` - Email address of the Databricks user. - -### `databricks users delete` - Delete a user. - -Deletes a user. Deleting a user from a Databricks Workspace also removes objects associated with the user. - -### `databricks users get` - Get user details. - -Gets information for a specific user in Databricks Workspace. - -### `databricks users list` - List users. - -Gets details for all the users associated with a Databricks Workspace. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks users patch` - Update user details. - -Partially updates a user resource by applying the supplied operations on specific user attributes. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks users update` - Replace a user. - -Replaces a user's information with the data supplied in request. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks user ID. - * `--user-name` - Email address of the Databricks user. - -## `databricks account users` - Manage users on the accou - -Databricks recommends using SCIM provisioning to sync users and groups automatically from -your identity provider to your Databricks Account. SCIM streamlines onboarding a new -employee or team by using your identity provider to create users and groups in Databricks Account -and give them the proper level of access. When a user leaves your organization or no longer -needs access to Databricks Account, admins can terminate the user in your identity provider and that -user’s account will also be removed from Databricks Account. This ensures a consistent offboarding -process and prevents unauthorized users from accessing sensitive data. - -### `databricks account users create` - Create a new user. - -Creates a new user in the Databricks Account. This new user will also be added to the Databricks account. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks user ID. - * `--user-name` - Email address of the Databricks user. - -### `databricks account users delete` - Delete a user. - -Deleting a user from a Databricks Account also removes objects associated with the user. - -### `databricks account users get` - Get user details. - -Gets information for a specific user in Databricks Account. - -### `databricks account users list` - List users. - -Gets details for all the users associated with a Databricks Account. - -Flags: - * `--attributes` - Comma-separated list of attributes to return in response. - * `--count` - Desired number of results per page. - * `--excluded-attributes` - Comma-separated list of attributes to exclude in response. - * `--filter` - Query by which the results have to be filtered. - * `--sort-by` - Attribute to sort the results. - * `--sort-order` - The order to sort the results. - * `--start-index` - Specifies the index of the first result. - -### `databricks account users patch` - Update user details. - -Partially updates a user resource by applying the supplied operations on specific user attributes. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -### `databricks account users update` - Replace a user. - -Replaces a user's information with the data supplied in request. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--active` - If this user is active. - * `--display-name` - String that represents a concatenation of given and family names. - * `--external-id` - - * `--id` - Databricks user ID. - * `--user-name` - Email address of the Databricks user. - -## `databricks account vpc-endpoints` - Manage VPC endpoints. - -These commands manage VPC endpoint configurations for this account. - -### `databricks account vpc-endpoints create` - Create VPC endpoint configuration. - -Creates a VPC endpoint configuration, which represents a -[VPC endpoint](https://docs.aws.amazon.com/vpc/latest/privatelink/vpc-endpoints.html) -object in AWS used to communicate privately with Databricks over -[AWS PrivateLink](https://aws.amazon.com/privatelink). - -After you create the VPC endpoint configuration, the Databricks -[endpoint service](https://docs.aws.amazon.com/vpc/latest/privatelink/privatelink-share-your-services.html) -automatically accepts the VPC endpoint. - -Before configuring PrivateLink, read the -[Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body * `--aws-vpc-endpoint-id` - The ID of the VPC endpoint object in AWS. - * `--region` - The AWS region in which this VPC endpoint object exists. - -### `databricks account vpc-endpoints delete` - Delete VPC endpoint configuration. - -Deletes a VPC endpoint configuration, which represents an -[AWS VPC endpoint](https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html) that -can communicate privately with Databricks over [AWS PrivateLink](https://aws.amazon.com/privatelink). - -Before configuring PrivateLink, read the [Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -### `databricks account vpc-endpoints get` - Get a VPC endpoint configuration. - -Gets a VPC endpoint configuration, which represents a [VPC endpoint](https://docs.aws.amazon.com/vpc/latest/privatelink/concepts.html) object in AWS used to communicate privately with Databricks over -[AWS PrivateLink](https://aws.amazon.com/privatelink). - -### `databricks account vpc-endpoints list` - Get all VPC endpoint configurations. - -Gets a list of all VPC endpoints for an account, specified by ID. - -Before configuring PrivateLink, read the [Databricks article about PrivateLink](https://docs.databricks.com/administration-guide/cloud-configurations/aws/privatelink.html). - -## `databricks warehouses` - Manage Databricks SQL warehouses. - -A SQL warehouse is a compute resource that lets you run SQL commands on data objects within -Databricks SQL. Compute resources are infrastructure resources that provide processing -capabilities in the cloud. - -### `databricks warehouses create` - Create a warehouse. - -Creates a new SQL warehouse. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--auto-stop-mins` - The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - * `--cluster-size` - Size of the clusters allocated for this warehouse. - * `--creator-name` - warehouse creator name. - * `--enable-photon` - Configures whether the warehouse should use Photon optimized clusters. - * `--enable-serverless-compute` - Configures whether the warehouse should use serverless compute. - * `--instance-profile-arn` - Deprecated. - * `--max-num-clusters` - Maximum number of clusters that the autoscaler will create to handle concurrent queries. - * `--min-num-clusters` - Minimum number of available clusters that will be maintained for this SQL warehouse. - * `--name` - Logical name for the cluster. - * `--spot-instance-policy` - Configurations whether the warehouse should use spot instances. - * `--warehouse-type` - Warehouse type: `PRO` or `CLASSIC`. - -### `databricks warehouses delete` - Delete a warehouse. - -Deletes a SQL warehouse. - -Flags: - * `--no-wait` - do not wait to reach DELETED state. - * `--timeout` - maximum amount of time to reach DELETED state. - -### `databricks warehouses edit` - Update a warehouse. - -Updates the configuration for a SQL warehouse. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--auto-stop-mins` - The amount of time in minutes that a SQL warehouse must be idle (i.e., no RUNNING queries) before it is automatically stopped. - * `--cluster-size` - Size of the clusters allocated for this warehouse. - * `--creator-name` - warehouse creator name. - * `--enable-photon` - Configures whether the warehouse should use Photon optimized clusters. - * `--enable-serverless-compute` - Configures whether the warehouse should use serverless compute. - * `--instance-profile-arn` - Deprecated. - * `--max-num-clusters` - Maximum number of clusters that the autoscaler will create to handle concurrent queries. - * `--min-num-clusters` - Minimum number of available clusters that will be maintained for this SQL warehouse. - * `--name` - Logical name for the cluster. - * `--spot-instance-policy` - Configurations whether the warehouse should use spot instances. - * `--warehouse-type` - Warehouse type: `PRO` or `CLASSIC`. - -### `databricks warehouses get` - Get warehouse info. - -Gets the information for a single SQL warehouse. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks warehouses get-workspace-warehouse-config` - Get the workspace configuration. - -Gets the workspace level configuration that is shared by all SQL warehouses in a workspace. - -### `databricks warehouses list` - List warehouses. - -Lists all SQL warehouses that a user has manager permissions on. - -Flags: - * `--run-as-user-id` - Service Principal which will be used to fetch the list of warehouses. - -### `databricks warehouses set-workspace-warehouse-config` - Set the workspace configuration. - -Sets the workspace level configuration that is shared by all SQL warehouses in a workspace. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--google-service-account` - GCP only: Google Service Account used to pass to cluster to access Google Cloud Storage. - * `--instance-profile-arn` - AWS Only: Instance profile used to pass IAM role to the cluster. - * `--security-policy` - Security policy for warehouses. - * `--serverless-agreement` - Internal. - -### `databricks warehouses start` - Start a warehouse. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - -### `databricks warehouses stop` - Stop a warehouse. - -Flags: - * `--no-wait` - do not wait to reach STOPPED state. - * `--timeout` - maximum amount of time to reach STOPPED state. - -## `databricks workspace` - The Workspace API allows you to list, import, export, and delete notebooks and folders. - -A notebook is a web-based interface to a document that contains runnable code, visualizations, and explanatory text. - -### `databricks workspace delete` - Delete a workspace object. - -Delete a workspace object. - -Deletes an object or a directory (and optionally recursively deletes all objects in the directory). -* If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. -* If `path` is a non-empty directory and `recursive` is set to `false`, this call returns an error `DIRECTORY_NOT_EMPTY`. - -Object deletion cannot be undone and deleting a directory recursively is not atomic. - -Flags: - * `--recursive` - The flag that specifies whether to delete the object recursively. - -### `databricks workspace export` - Export a workspace object. - -Exports an object or the contents of an entire directory. - -If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. - -One can only export a directory in `DBC` format. If the exported data would exceed size limit, this call returns `MAX_NOTEBOOK_SIZE_EXCEEDED`. Currently, command does not support exporting a library. - -Flags: - * `--direct-download` - Flag to enable direct download. - * `--format` - This specifies the format of the exported file. - -### `databricks workspace get-status` - Get status. - -Gets the status of an object or a directory. -If `path` does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. - -### `databricks workspace import` - Import a workspace object. - -Imports a workspace object (for example, a notebook or file) or the contents of an entire directory. -If `path` already exists and `overwrite` is set to `false`, this call returns an error `RESOURCE_ALREADY_EXISTS`. -One can only use `DBC` format to import a directory. - -Flags: - * `--content` - The base64-encoded content. - * `--format` - This specifies the format of the file to be imported. - * `--language` - The language of the object. - * `--overwrite` - The flag that specifies whether to overwrite existing object. - -### `databricks workspace list` - List contents. - -Lists the contents of a directory, or the object if it is not a directory.If -the input path does not exist, this call returns an error `RESOURCE_DOES_NOT_EXIST`. - -Flags: - * `--notebooks-modified-after` - ... - -### `databricks workspace mkdirs` - Create a directory. - -Creates the specified directory (and necessary parent directories if they do not exist). -If there is an object (not a directory) at any prefix of the input path, this call returns -an error `RESOURCE_ALREADY_EXISTS`. - -Note that if this operation fails it may have succeeded in creating some of the necessary parrent directories. - -## `databricks account workspace-assignment` - The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. - -The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account. - -### `databricks account workspace-assignment delete` - Delete permissions assignment. - -Deletes the workspace permissions assignment in a given account and workspace for the specified principal. - -### `databricks account workspace-assignment get` - List workspace permissions. - -Get an array of workspace permissions for the specified account and workspace. - -### `databricks account workspace-assignment list` - Get permission assignments. - -Get the permission assignments for the specified Databricks Account and Databricks Workspace. - -### `databricks account workspace-assignment update` - Create or update permissions assignment. - -Creates or updates the workspace permissions assignment in a given account and workspace for the specified principal. - -Flags: - * `--json` - either inline JSON string or @path/to/file.json with request body - -## `databricks workspace-conf` - command allows updating known workspace settings for advanced users. - -command allows updating known workspace settings for advanced users. - -### `databricks workspace-conf get-status` - Check configuration status. - -Gets the configuration status for a workspace. - -### `databricks workspace-conf set-status` - Enable/disable features. - -Sets the configuration status for a workspace, including enabling or disabling it. - -## `databricks account workspaces` - These commands manage workspaces for this account. - -These commands manage workspaces for this account. A Databricks workspace is an environment for -accessing all of your Databricks assets. The workspace organizes objects (notebooks, -libraries, and experiments) into folders, and provides access to data and computational -resources such as clusters and jobs. - -These endpoints are available if your account is on the E2 version of the platform or on -a select custom plan that allows multiple workspaces per account. - -### `databricks account workspaces create` - Create a new workspace. - -Creates a new workspace. - -**Important**: This operation is asynchronous. A response with HTTP status code 200 means -the request has been accepted and is in progress, but does not mean that the workspace -deployed successfully and is running. The initial workspace status is typically -`PROVISIONING`. Use the workspace ID (`workspace_id`) field in the response to identify -the new workspace and make repeated `GET` requests with the workspace ID and check -its status. The workspace becomes available when the status changes to `RUNNING`. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--json` - either inline JSON string or @path/to/file.json with request body - * `--aws-region` - The AWS region of the workspace's data plane. - * `--cloud` - The cloud provider which the workspace uses. - * `--credentials-id` - ID of the workspace's credential configuration object. - * `--deployment-name` - The deployment name defines part of the subdomain for the workspace. - * `--location` - The Google Cloud region of the workspace data plane in your Google account. - * `--managed-services-customer-managed-key-id` - The ID of the workspace's managed services encryption key configuration object. - * `--network-id` - - * `--pricing-tier` - The pricing tier of the workspace. - * `--private-access-settings-id` - ID of the workspace's private access settings object. - * `--storage-configuration-id` - The ID of the workspace's storage configuration object. - * `--storage-customer-managed-key-id` - The ID of the workspace's storage encryption key configuration object. - -### `databricks account workspaces delete` - Delete a workspace. - -Terminates and deletes a Databricks workspace. From an API perspective, deletion is immediate. However, it might take a few minutes for all workspaces resources to be deleted, depending on the size and number of workspace resources. - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -### `databricks account workspaces get` - Get a workspace. - -Gets information including status for a Databricks workspace, specified by ID. In the response, the `workspace_status` field indicates the current status. After initial workspace creation (which is asynchronous), make repeated `GET` requests with the workspace ID and check its status. The workspace becomes available when the status changes to `RUNNING`. - -For information about how to create a new workspace with command **including error handling**, see [Create a new workspace using the Account API](http://docs.databricks.com/administration-guide/account-api/new-workspace.html). - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -### `databricks account workspaces list` - Get all workspaces. - -Gets a list of all workspaces associated with an account, specified by ID. - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -### `databricks account workspaces update` - Update workspace configuration. - -Updates a workspace configuration for either a running workspace or a failed workspace. The elements that can be updated varies between these two use cases. - -Update a failed workspace: -You can update a Databricks workspace configuration for failed workspace deployment for some fields, but not all fields. For a failed workspace, this request supports updates to the following fields only: -- Credential configuration ID -- Storage configuration ID -- Network configuration ID. Used only to add or change a network configuration for a customer-managed VPC. For a failed workspace only, you can convert a workspace with Databricks-managed VPC to use a customer-managed VPC by adding this ID. You cannot downgrade a workspace with a customer-managed VPC to be a Databricks-managed VPC. You can update the network configuration for a failed or running workspace to add PrivateLink support, though you must also add a private access settings object. -- Key configuration ID for managed services (control plane storage, such as notebook source and Databricks SQL queries). Used only if you use customer-managed keys for managed services. -- Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). Used only if you use customer-managed keys for workspace storage. **Important**: If the workspace was ever in the running state, even if briefly before becoming a failed workspace, you cannot add a new key configuration ID for workspace storage. -- Private access settings ID to add PrivateLink support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - -After calling the `PATCH` operation to update the workspace configuration, make repeated `GET` requests with the workspace ID and check the workspace status. The workspace is successful if the status changes to `RUNNING`. - -For information about how to create a new workspace with command **including error handling**, see [Create a new workspace using the Account API](http://docs.databricks.com/administration-guide/account-api/new-workspace.html). - -Update a running workspace: -You can update a Databricks workspace configuration for running workspaces for some fields, but not all fields. For a running workspace, this request supports updating the following fields only: -- Credential configuration ID - -- Network configuration ID. Used only if you already use a customer-managed VPC. You cannot convert a running workspace from a Databricks-managed VPC to a customer-managed VPC. You can use a network configuration update in command for a failed or running workspace to add support for PrivateLink, although you also need to add a private access settings object. - -- Key configuration ID for managed services (control plane storage, such as notebook source and Databricks SQL queries). Databricks does not directly encrypt the data with the customer-managed key (CMK). Databricks uses both the CMK and the Databricks managed key (DMK) that is unique to your workspace to encrypt the Data Encryption Key (DEK). Databricks uses the DEK to encrypt your workspace's managed services persisted data. If the workspace does not already have a CMK for managed services, adding this ID enables managed services encryption for new or updated data. Existing managed services data that existed before adding the key remains not encrypted with the DEK until it is modified. If the workspace already has customer-managed keys for managed services, this request rotates (changes) the CMK keys and the DEK is re-encrypted with the DMK and the new CMK. -- Key configuration ID for workspace storage (root S3 bucket and, optionally, EBS volumes). You can set this only if the workspace does not already have a customer-managed key configuration for workspace storage. -- Private access settings ID to add PrivateLink support. You can add or update the private access settings ID to upgrade a workspace to add support for front-end, back-end, or both types of connectivity. You cannot remove (downgrade) any existing front-end or back-end PrivateLink support on a workspace. - -**Important**: To update a running workspace, your workspace must have no running compute resources that run in your workspace's VPC in the Classic data plane. For example, stop all all-purpose clusters, job clusters, pools with running clusters, and Classic SQL warehouses. If you do not terminate all cluster instances in the workspace before calling command, the request will fail. - -**Important**: Customer-managed keys and customer-managed VPCs are supported by only some deployment types and subscription types. If you have questions about availability, contact your Databricks representative. - -This operation is available only if your account is on the E2 version of the platform or on a select custom plan that allows multiple workspaces per account. - -Flags: - * `--no-wait` - do not wait to reach RUNNING state. - * `--timeout` - maximum amount of time to reach RUNNING state. - * `--aws-region` - The AWS region of the workspace's data plane (for example, `us-west-2`). - * `--credentials-id` - ID of the workspace's credential configuration object. - * `--managed-services-customer-managed-key-id` - The ID of the workspace's managed services encryption key configuration object. - * `--network-id` - The ID of the workspace's network configuration object. - * `--storage-configuration-id` - The ID of the workspace's storage configuration object. - * `--storage-customer-managed-key-id` - The ID of the key configuration object for workspace storage. diff --git a/go.mod b/go.mod index f33219aa4..5e29d295e 100644 --- a/go.mod +++ b/go.mod @@ -1,53 +1,50 @@ module github.com/databricks/cli -go 1.21 +go 1.22 require ( - github.com/briandowns/spinner v1.23.0 // Apache 2.0 - github.com/databricks/databricks-sdk-go v0.30.1 // Apache 2.0 - github.com/fatih/color v1.16.0 // MIT + github.com/Masterminds/semver/v3 v3.2.1 // MIT + github.com/briandowns/spinner v1.23.1 // Apache 2.0 + github.com/databricks/databricks-sdk-go v0.43.2 // Apache 2.0 + github.com/fatih/color v1.17.0 // MIT github.com/ghodss/yaml v1.0.0 // MIT + NOTICE github.com/google/uuid v1.6.0 // BSD-3-Clause - github.com/hashicorp/go-version v1.6.0 // MPL 2.0 - github.com/hashicorp/hc-install v0.6.2 // MPL 2.0 - github.com/hashicorp/terraform-exec v0.20.0 // MPL 2.0 - github.com/hashicorp/terraform-json v0.21.0 // MPL 2.0 - github.com/imdario/mergo v0.3.15 // BSD-3-Clause + github.com/hashicorp/go-version v1.7.0 // MPL 2.0 + github.com/hashicorp/hc-install v0.7.0 // MPL 2.0 + github.com/hashicorp/terraform-exec v0.21.0 // MPL 2.0 + github.com/hashicorp/terraform-json v0.22.1 // MPL 2.0 github.com/manifoldco/promptui v0.9.0 // BSD-3-Clause github.com/mattn/go-isatty v0.0.20 // MIT github.com/nwidger/jsoncolor v0.3.2 // MIT github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 // BSD-2-Clause github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 // MIT - github.com/spf13/cobra v1.8.0 // Apache 2.0 + github.com/spf13/cobra v1.8.1 // Apache 2.0 github.com/spf13/pflag v1.0.5 // BSD-3-Clause - github.com/stretchr/testify v1.8.4 // MIT - golang.org/x/exp v0.0.0-20231006140011-7918f672742d - golang.org/x/mod v0.14.0 - golang.org/x/oauth2 v0.16.0 - golang.org/x/sync v0.6.0 - golang.org/x/term v0.16.0 - golang.org/x/text v0.14.0 + github.com/stretchr/testify v1.9.0 // MIT + golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 + golang.org/x/mod v0.19.0 + golang.org/x/oauth2 v0.21.0 + golang.org/x/sync v0.7.0 + golang.org/x/term v0.22.0 + golang.org/x/text v0.16.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 -) - -require ( - go.uber.org/mock v0.4.0 gopkg.in/yaml.v3 v3.0.1 ) require ( - cloud.google.com/go/compute v1.23.3 // indirect - cloud.google.com/go/compute/metadata v0.2.3 // indirect - github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 // indirect + cloud.google.com/go/auth v0.4.2 // indirect + cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect + cloud.google.com/go/compute/metadata v0.3.0 // indirect + github.com/ProtonMail/go-crypto v1.1.0-alpha.2 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e // indirect github.com/cloudflare/circl v1.3.7 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect - github.com/go-logr/logr v1.3.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect - github.com/golang/protobuf v1.5.3 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-querystring v1.1.0 // indirect github.com/google/s2a-go v0.1.7 // indirect github.com/googleapis/enterprise-certificate-proxy v0.3.2 // indirect @@ -55,21 +52,20 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect - github.com/stretchr/objx v0.5.0 // indirect - github.com/zclconf/go-cty v1.14.1 // indirect + github.com/stretchr/objx v0.5.2 // indirect + github.com/zclconf/go-cty v1.14.4 // indirect go.opencensus.io v0.24.0 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 // indirect - go.opentelemetry.io/otel v1.21.0 // indirect - go.opentelemetry.io/otel/metric v1.21.0 // indirect - go.opentelemetry.io/otel/trace v1.21.0 // indirect - golang.org/x/crypto v0.18.0 // indirect - golang.org/x/net v0.20.0 // indirect - golang.org/x/sys v0.16.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect + go.opentelemetry.io/otel v1.24.0 // indirect + go.opentelemetry.io/otel/metric v1.24.0 // indirect + go.opentelemetry.io/otel/trace v1.24.0 // indirect + golang.org/x/crypto v0.23.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/sys v0.22.0 // indirect golang.org/x/time v0.5.0 // indirect - google.golang.org/api v0.154.0 // indirect - google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 // indirect - google.golang.org/grpc v1.59.0 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/api v0.182.0 // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/grpc v1.64.0 // indirect + google.golang.org/protobuf v1.34.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect ) diff --git a/go.sum b/go.sum index 96d043119..8f774a47a 100644 --- a/go.sum +++ b/go.sum @@ -1,20 +1,23 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go/compute v1.23.3 h1:6sVlXXBmbd7jNX0Ipq0trII3e4n1/MsADLK6a+aiVlk= -cloud.google.com/go/compute v1.23.3/go.mod h1:VCgBUoMnIVIR0CscqQiPJLAG25E3ZRZMzcFZeQ+h8CI= -cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= -cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= +cloud.google.com/go/auth v0.4.2 h1:sb0eyLkhRtpq5jA+a8KWw0W70YcdVca7KJ8TM0AFYDg= +cloud.google.com/go/auth v0.4.2/go.mod h1:Kqvlz1cf1sNA0D+sYJnkPQOP+JMHkuHeIgVmCRtZOLc= +cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= +cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= +cloud.google.com/go/compute/metadata v0.3.0 h1:Tz+eQXMEqDIKRsmY3cHTL6FVaynIjX2QxYC4trgAKZc= +cloud.google.com/go/compute/metadata v0.3.0/go.mod h1:zFmK7XCadkQkj6TtorcaGlCW1hT1fIilQDwofLpJ20k= dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk= dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= +github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Microsoft/go-winio v0.6.1 h1:9/kr64B9VUZrLm5YYwbGtUJnMgqWVOdUAXu6Migciow= github.com/Microsoft/go-winio v0.6.1/go.mod h1:LRdKpFKfdobln8UmuiYcKPot9D2v6svN5+sAH+4kjUM= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371 h1:kkhsdkhsCvIsutKu5zLMgWtgh9YxGCNAw8Ad8hjwfYg= -github.com/ProtonMail/go-crypto v0.0.0-20230828082145-3c4c8a2d2371/go.mod h1:EjAoLdwvbIOoOQr3ihjnSoLZRtE8azugULFRteWMNc0= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2 h1:bkyFVUP+ROOARdgCiJzNQo2V2kiB97LyUpzH9P6Hrlg= +github.com/ProtonMail/go-crypto v1.1.0-alpha.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE= github.com/apparentlymart/go-textseg/v15 v15.0.0 h1:uYvfpb3DyLSCGWnctWKGj857c6ew1u1fNQOlOtuGxQY= github.com/apparentlymart/go-textseg/v15 v15.0.0/go.mod h1:K8XmNZdhEBkdlyDdvbmmsvpAG721bKi0joRfFdHIWJ4= -github.com/briandowns/spinner v1.23.0 h1:alDF2guRWqa/FOZZYWjlMIx2L6H0wyewPxo/CH4Pt2A= -github.com/briandowns/spinner v1.23.0/go.mod h1:rPG4gmXeN3wQV/TsAY4w8lPdIM6RX3yqeBQJSrbXjuE= -github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0= +github.com/briandowns/spinner v1.23.1 h1:t5fDPmScwUjozhDj4FA46p5acZWIPXYE30qW2Ptu650= +github.com/briandowns/spinner v1.23.1/go.mod h1:LaZeM4wm2Ywy6vO571mvhQNRcWfRUnXOs0RcKV0wYKM= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/chzyer/logex v1.1.10 h1:Swpa1K6QvQznwJRcfTfQJmTE72DqScAa40E+fbHEXEE= github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI= @@ -23,15 +26,14 @@ github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5P github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1 h1:q763qf9huN11kDQavWsoZXJNW3xEE4JJyHa5Q25/sd8= github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/cloudflare/circl v1.3.3/go.mod h1:5XYMA4rFBvNIrhs50XuiBJ15vF2pZn4nnUKZrLbUZFA= github.com/cloudflare/circl v1.3.7 h1:qlCDlTPz2n9fu58M0Nh1J/JzcFpfgkFHHX3O35r5vcU= github.com/cloudflare/circl v1.3.7/go.mod h1:sRTcRWXGLrKw6yIGJ+l7amYJFfAXbZG0kBSc8r4zxgA= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= -github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg= github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4= -github.com/databricks/databricks-sdk-go v0.30.1 h1:ux6I3aHqUH/AOLZEaEHBmwkbHuSAmb+42mTfvh2A7bE= -github.com/databricks/databricks-sdk-go v0.30.1/go.mod h1:QB64wT8EmR9T4ZPqeTRKjfIF4tPZuP9M9kM8Hcr019Q= +github.com/databricks/databricks-sdk-go v0.43.2 h1:4B+sHAYO5kFqwZNQRmsF70eecqsFX6i/0KfXoDFQT/E= +github.com/databricks/databricks-sdk-go v0.43.2/go.mod h1:nlzeOEgJ1Tmb5HyknBJ3GEorCZKWqEBoHprvPmTSNq8= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -42,8 +44,8 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/fatih/color v1.16.0 h1:zmkK9Ngbjj+K0yRhTVONQh1p/HknKYSlNT+vZCzyokM= -github.com/fatih/color v1.16.0/go.mod h1:fL2Sau1YI5c0pdGEVCbKQbLXB6edEj1ZgiY4NijnWvE= +github.com/fatih/color v1.17.0 h1:GlRw1BRJxkpqUCBKzKOw098ed57fEsKeNjpTe3cSjK4= +github.com/fatih/color v1.17.0/go.mod h1:YZ7TlrGPkiz6ku9fK3TLD/pl3CpsiFyu8N92HLgmosI= github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg= github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= @@ -52,11 +54,11 @@ github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66D github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376/go.mod h1:an3vInlBmSxCcxctByoQdvwPiA7DTK7jaaFDBTtu0ic= github.com/go-git/go-billy/v5 v5.5.0 h1:yEY4yhzCDuMGSv83oGxiBotRzhwhNr8VZyphhiu+mTU= github.com/go-git/go-billy/v5 v5.5.0/go.mod h1:hmexnoNsr2SJU1Ju67OaNz5ASJY3+sHgFRpCtpDCKow= -github.com/go-git/go-git/v5 v5.10.1 h1:tu8/D8i+TWxgKpzQ3Vc43e+kkhXqtsZCKI/egajKnxk= -github.com/go-git/go-git/v5 v5.10.1/go.mod h1:uEuHjxkHap8kAl//V5F/nNWwqIYtP/402ddd05mp0wg= +github.com/go-git/go-git/v5 v5.12.0 h1:7Md+ndsjrzZxbddRDZjF14qK+NN56sy6wkqaVrjZtys= +github.com/go-git/go-git/v5 v5.12.0/go.mod h1:FTM9VKtnI2m65hNI/TenDDDnUf2Q9FHnXYjuz9i5OEY= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.3.0 h1:2y3SDp0ZXuc6/cjLSZ+Q3ir+QB9T/iG5yYRXqsagWSY= -github.com/go-logr/logr v1.3.0/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= @@ -73,10 +75,8 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0= github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8= github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= +github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= +github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -84,7 +84,6 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-querystring v1.1.0 h1:AnCroh3fv4ZBgVIf1Iwtovgjaw/GiKJo8M8yD/fhyJ8= @@ -96,20 +95,18 @@ github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= -github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= -github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/hashicorp/go-cleanhttp v0.5.2 h1:035FKYIWjmULyFRBKPs8TBQoi0x6d9G4xc9neXJWAZQ= github.com/hashicorp/go-cleanhttp v0.5.2/go.mod h1:kO/YDlP8L1346E6Sodw+PrpBSV4/SoxCXGY6BqNFT48= -github.com/hashicorp/go-version v1.6.0 h1:feTTfFNnjP967rlCxM/I9g701jU+RN74YKx2mOkIeek= -github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= -github.com/hashicorp/hc-install v0.6.2 h1:V1k+Vraqz4olgZ9UzKiAcbman9i9scg9GgSt/U3mw/M= -github.com/hashicorp/hc-install v0.6.2/go.mod h1:2JBpd+NCFKiHiu/yYCGaPyPHhZLxXTpz8oreHa/a3Ps= -github.com/hashicorp/terraform-exec v0.20.0 h1:DIZnPsqzPGuUnq6cH8jWcPunBfY+C+M8JyYF3vpnuEo= -github.com/hashicorp/terraform-exec v0.20.0/go.mod h1:ckKGkJWbsNqFKV1itgMnE0hY9IYf1HoiekpuN0eWoDw= -github.com/hashicorp/terraform-json v0.21.0 h1:9NQxbLNqPbEMze+S6+YluEdXgJmhQykRyRNd+zTI05U= -github.com/hashicorp/terraform-json v0.21.0/go.mod h1:qdeBs11ovMzo5puhrRibdD6d2Dq6TyE/28JiU4tIQxk= -github.com/imdario/mergo v0.3.15 h1:M8XP7IuFNsqUx6VPK2P9OSmsYsI/YFaGil0uD21V3dM= -github.com/imdario/mergo v0.3.15/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= +github.com/hashicorp/go-version v1.7.0 h1:5tqGy27NaOTB8yJKUZELlFAS/LTKJkrmONwQKeRZfjY= +github.com/hashicorp/go-version v1.7.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA= +github.com/hashicorp/hc-install v0.7.0 h1:Uu9edVqjKQxxuD28mR5TikkKDd/p55S8vzPC1659aBk= +github.com/hashicorp/hc-install v0.7.0/go.mod h1:ELmmzZlGnEcqoUMKUuykHaPCIR1sYLYX+KSggWSKZuA= +github.com/hashicorp/terraform-exec v0.21.0 h1:uNkLAe95ey5Uux6KJdua6+cv8asgILFVWkd/RG0D2XQ= +github.com/hashicorp/terraform-exec v0.21.0/go.mod h1:1PPeMYou+KDUSSeRE9szMZ/oHf4fYUmB923Wzbq1ICg= +github.com/hashicorp/terraform-json v0.22.1 h1:xft84GZR0QzjPVWs4lRUwvTcPnegqlyS7orfb5Ltvec= +github.com/hashicorp/terraform-json v0.22.1/go.mod h1:JbWSQCLFSXFFhg42T7l9iJwdGXBYV8fmmD6o/ML4p3A= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A= @@ -138,84 +135,69 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= -github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/skeema/knownhosts v1.2.1 h1:SHWdIUa82uGZz+F+47k8SY4QhhI291cXCpopT1lK2AQ= -github.com/skeema/knownhosts v1.2.1/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= -github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= -github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8= +github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4= +github.com/skeema/knownhosts v1.2.2 h1:Iug2P4fLmDw9f41PB6thxUkNUkJzB5i+1/exaj40L3A= +github.com/skeema/knownhosts v1.2.2/go.mod h1:xYbVRSPxqBZFrdmDyMmsOs+uX1UZC3nTN3ThzgDxUwo= +github.com/spf13/cobra v1.8.1 h1:e5/vxKd/rZsfSJMUX1agtjeTDf+qv1/JdBF8gg5k9ZM= +github.com/spf13/cobra v1.8.1/go.mod h1:wHxEcudfqmLYa8iTfL+OuZPbBZkmvliBWKIezN3kD9Y= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= -github.com/stretchr/objx v0.5.0 h1:1zr/of2m5FGMsad5YfcqgdqdWrIhu+EBEJRhR1U7z/c= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= +github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM= github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw= -github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.14.1 h1:t9fyA35fwjjUMcmL5hLER+e/rEPqrbCK1/OSE4SI9KA= -github.com/zclconf/go-cty v1.14.1/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= +github.com/zclconf/go-cty v1.14.4 h1:uXXczd9QDGsgu0i/QFR/hzI5NYCHLf6NQw/atrbnhq8= +github.com/zclconf/go-cty v1.14.4/go.mod h1:VvMs5i0vgZdhYawQNq5kePSpLAoz8u1xvZgrPIxfnZE= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1 h1:SpGay3w+nEwMpfVnbqOLH5gY52/foP8RE8UzTZ1pdSE= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.46.1/go.mod h1:4UoMYEZOC0yN/sPGH76KPkkU7zgiEWYWL9vwmbnTJPE= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1 h1:aFJWCqJMNjENlcleuuOkGAPH82y0yULBScfXcIEdS24= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.46.1/go.mod h1:sEGXWArGqc3tVa+ekntsN65DmVbVeW+7lTKTjZF3/Fo= -go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= -go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= -go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= -go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= -go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= -go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= -go.uber.org/mock v0.4.0 h1:VcM4ZOtdbR4f6VXfiOpwpVJDL6lCReaZ6mw31wqh7KU= -go.uber.org/mock v0.4.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0 h1:4Pp6oUg3+e/6M4C0A/3kJ2VYa++dsWVTtGgLVj5xtHg= +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.49.0/go.mod h1:Mjt1i1INqiaoZOMGR1RIUJN+i3ChKoFRqzrRQhlkbs0= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw= +go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo= +go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo= +go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI= +go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco= +go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI= +go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.3.1-0.20221117191849-2c476679df9a/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= -golang.org/x/crypto v0.7.0/go.mod h1:pYwdfH91IfpZVANVyUOhSIPZaFoJGxTFbZhFTx+dXZU= -golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= -golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d h1:jtJma62tbqLibJ5sFQz8bKtEM8rJBtfilJ2qTU199MI= -golang.org/x/exp v0.0.0-20231006140011-7918f672742d/go.mod h1:ldy0pHrwJyGW56pPQzzkH36rKxoZW1tw7ZJpeKx+hdo= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 h1:LfspQV/FYTatPTr/3HzIcmiUFH7PGP+OQ6mgDYo3yuQ= +golang.org/x/exp v0.0.0-20240222234643-814bf88cf225/go.mod h1:CxmFvTBINI24O/j8iY7H1xHzx2i4OsyguNBmN/uPtqc= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= -golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= -golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= -golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= +golang.org/x/mod v0.19.0 h1:fEdghXQSo20giMthA7cd28ZC+jts4amQ3YMXiP5oMQ8= +golang.org/x/mod v0.19.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= -golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= -golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= -golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.8.0/go.mod h1:QVkue5JL9kW//ek3r6jTKnTFis1tRmNAW2P1shuFdJc= -golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= -golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= -golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ= -golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= +golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M= +golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -223,34 +205,17 @@ golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616045830-e2b7044e8c71/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU= -golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= -golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= -golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= -golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.6.0/go.mod h1:m6U89DPEgQRMq3DNkDClhWw02AUbt2daBVO4cn4Hv9U= -golang.org/x/term v0.16.0 h1:m+B6fahuftsE9qjo0VWp2FW0mB3MTJvR0BaMQrq0pmE= -golang.org/x/term v0.16.0/go.mod h1:yn7UURbUtPyrVJPGPq404EukNFxcm/foM+bV/bfcDsY= +golang.org/x/sys v0.22.0 h1:RI27ohtqKCnwULzJLqkv897zojh5/DwS/ENaMzUOaWI= +golang.org/x/sys v0.22.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/term v0.22.0 h1:BbsgPEJULsl2fV/AT3v15Mjva5yXKQDyKf+TbDz7QJk= +golang.org/x/term v0.22.0/go.mod h1:F3qCibpT5AMpCRfhfT53vVJwhLtIVHhB9XDjfFvnMI4= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= -golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.8.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.16.0 h1:a94ExnEXNtEwYLGJSIUxnWoxoRz/ZcCsV63ROupILh4= +golang.org/x/text v0.16.0/go.mod h1:GhwF1Be+LQoKShO3cGOHzqOgRrGaYc9AvblQOmPVHnI= golang.org/x/time v0.5.0 h1:o7cqy6amK/52YcAKIPlM3a+Fpj35zvRj2TP+e1xFSfk= golang.org/x/time v0.5.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= @@ -258,31 +223,25 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= -golang.org/x/tools v0.14.0 h1:jvNa2pY0M4r62jkRQ6RwEZZyPcymeL9XZMLBbV7U2nc= -golang.org/x/tools v0.14.0/go.mod h1:uYBEerGOWcJyEORxN+Ek8+TT266gXkNlHdJBwexUsBg= -golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d h1:vU5i/LfpvrRCpgM/VPfJLg5KjxD3E+hfT1SH+d9zLwg= +golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/api v0.154.0 h1:X7QkVKZBskztmpPKWQXgjJRPA2dJYrL6r+sYPRLj050= -google.golang.org/api v0.154.0/go.mod h1:qhSMkM85hgqiokIYsrRyKxrjfBeIhgl4Z2JmeRkYylc= +google.golang.org/api v0.182.0 h1:if5fPvudRQ78GeRx3RayIoiuV7modtErPIZC/T2bIvE= +google.golang.org/api v0.182.0/go.mod h1:cGhjy4caqA5yXRzEhkHI8Y9mfyC2VLTlER2l08xaqtM= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM= -google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4 h1:DC7wcm+i+P1rN3Ff07vL+OndGg5OhNddHyTA+ocPqYE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20231127180814-3a041ad873d4/go.mod h1:eJVxU6o+4G1PSczBr85xmyvSNYAKvAYgkub40YGomFM= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e h1:Elxv5MwEkCI9f5SkoL6afed6NTdxaGoAo39eANBwHL8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= -google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= -google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -292,10 +251,8 @@ google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg= +google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/ini.v1 v1.67.0 h1:Dgnx+6+nfE+IfzjUEISNeydPJh9AXNNsWbGP9KzCsOA= diff --git a/internal/auth_describe_test.go b/internal/auth_describe_test.go new file mode 100644 index 000000000..90b5d6801 --- /dev/null +++ b/internal/auth_describe_test.go @@ -0,0 +1,49 @@ +package internal + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go" + "github.com/stretchr/testify/require" +) + +func TestAuthDescribeSuccess(t *testing.T) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + stdout, _ := RequireSuccessfulRun(t, "auth", "describe") + outStr := stdout.String() + + w, err := databricks.NewWorkspaceClient(&databricks.Config{}) + require.NoError(t, err) + + require.NotEmpty(t, outStr) + require.Contains(t, outStr, fmt.Sprintf("Host: %s", w.Config.Host)) + + me, err := w.CurrentUser.Me(context.Background()) + require.NoError(t, err) + require.Contains(t, outStr, fmt.Sprintf("User: %s", me.UserName)) + require.Contains(t, outStr, fmt.Sprintf("Authenticated with: %s", w.Config.AuthType)) + require.Contains(t, outStr, "Current configuration:") + require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host)) + require.Contains(t, outStr, "✓ profile: default") +} + +func TestAuthDescribeFailure(t *testing.T) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + stdout, _ := RequireSuccessfulRun(t, "auth", "describe", "--profile", "nonexistent") + outStr := stdout.String() + + require.NotEmpty(t, outStr) + require.Contains(t, outStr, "Unable to authenticate: resolve") + require.Contains(t, outStr, "has no nonexistent profile configured") + require.Contains(t, outStr, "Current configuration:") + + w, err := databricks.NewWorkspaceClient(&databricks.Config{}) + require.NoError(t, err) + + require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host)) + require.Contains(t, outStr, "✓ profile: nonexistent (from --profile flag)") +} diff --git a/internal/build/variables.go b/internal/build/variables.go index 096657c6e..197dee9c3 100644 --- a/internal/build/variables.go +++ b/internal/build/variables.go @@ -16,3 +16,9 @@ var buildPatch string = "0" var buildPrerelease string = "" var buildIsSnapshot string = "false" var buildTimestamp string = "0" + +// This function is used to set the build version for testing purposes. +func SetBuildVersion(version string) { + buildVersion = version + info.Version = version +} diff --git a/internal/bundle/artifacts_test.go b/internal/bundle/artifacts_test.go index 71f91fded..46c236a4e 100644 --- a/internal/bundle/artifacts_test.go +++ b/internal/bundle/artifacts_test.go @@ -1,7 +1,6 @@ package bundle import ( - "context" "os" "path" "path/filepath" @@ -11,9 +10,11 @@ import ( "github.com/databricks/cli/bundle" "github.com/databricks/cli/bundle/artifacts" "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/bundle/config/resources" "github.com/databricks/cli/internal" - "github.com/databricks/databricks-sdk-go" + "github.com/databricks/cli/internal/acc" "github.com/databricks/databricks-sdk-go/service/compute" + "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/stretchr/testify/require" ) @@ -26,30 +27,17 @@ func touchEmptyFile(t *testing.T, path string) { } func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { - t.Log(internal.GetEnvOrSkipTest(t, "CLOUD_ENV")) - + ctx, wt := acc.WorkspaceTest(t) + w := wt.W dir := t.TempDir() whlPath := filepath.Join(dir, "dist", "test.whl") touchEmptyFile(t, whlPath) - artifact := &config.Artifact{ - Type: "whl", - Files: []config.ArtifactFile{ - { - Source: whlPath, - Libraries: []*compute.Library{ - {Whl: "dist\\test.whl"}, - }, - }, - }, - } - - w := databricks.Must(databricks.NewWorkspaceClient()) wsDir := internal.TemporaryWorkspaceDir(t, w) b := &bundle.Bundle{ + RootPath: dir, Config: config.Root{ - Path: dir, Bundle: config.Bundle{ Target: "whatever", }, @@ -57,13 +45,180 @@ func TestAccUploadArtifactFileToCorrectRemotePath(t *testing.T) { ArtifactPath: wsDir, }, Artifacts: config.Artifacts{ - "test": artifact, + "test": &config.Artifact{ + Type: "whl", + Files: []config.ArtifactFile{ + { + Source: whlPath, + }, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "dist/test.whl", + }, + }, + }, + }, + }, + }, + }, }, }, } - err := bundle.Apply(context.Background(), b, artifacts.BasicUpload("test")) - require.NoError(t, err) - require.Regexp(t, regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), artifact.Files[0].RemotePath) - require.Regexp(t, regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), artifact.Files[0].Libraries[0].Whl) + diags := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) + require.NoError(t, diags.Error()) + + // The remote path attribute on the artifact file should have been set. + require.Regexp(t, + regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + b.Config.Artifacts["test"].Files[0].RemotePath, + ) + + // The task library path should have been updated to the remote path. + require.Regexp(t, + regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl, + ) +} + +func TestAccUploadArtifactFileToCorrectRemotePathWithEnvironments(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + dir := t.TempDir() + whlPath := filepath.Join(dir, "dist", "test.whl") + touchEmptyFile(t, whlPath) + + wsDir := internal.TemporaryWorkspaceDir(t, w) + + b := &bundle.Bundle{ + RootPath: dir, + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + }, + Workspace: config.Workspace{ + ArtifactPath: wsDir, + }, + Artifacts: config.Artifacts{ + "test": &config.Artifact{ + Type: "whl", + Files: []config.ArtifactFile{ + { + Source: whlPath, + }, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test": { + JobSettings: &jobs.JobSettings{ + Environments: []jobs.JobEnvironment{ + { + Spec: &compute.Environment{ + Dependencies: []string{ + "dist/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) + require.NoError(t, diags.Error()) + + // The remote path attribute on the artifact file should have been set. + require.Regexp(t, + regexp.MustCompile(path.Join(regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + b.Config.Artifacts["test"].Files[0].RemotePath, + ) + + // The job environment deps path should have been updated to the remote path. + require.Regexp(t, + regexp.MustCompile(path.Join("/Workspace", regexp.QuoteMeta(wsDir), `.internal/test\.whl`)), + b.Config.Resources.Jobs["test"].JobSettings.Environments[0].Spec.Dependencies[0], + ) +} + +func TestAccUploadArtifactFileToCorrectRemotePathForVolumes(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + volumePath := internal.TemporaryUcVolume(t, w) + + dir := t.TempDir() + whlPath := filepath.Join(dir, "dist", "test.whl") + touchEmptyFile(t, whlPath) + + b := &bundle.Bundle{ + RootPath: dir, + Config: config.Root{ + Bundle: config.Bundle{ + Target: "whatever", + }, + Workspace: config.Workspace{ + ArtifactPath: volumePath, + }, + Artifacts: config.Artifacts{ + "test": &config.Artifact{ + Type: "whl", + Files: []config.ArtifactFile{ + { + Source: whlPath, + }, + }, + }, + }, + Resources: config.Resources{ + Jobs: map[string]*resources.Job{ + "test": { + JobSettings: &jobs.JobSettings{ + Tasks: []jobs.Task{ + { + Libraries: []compute.Library{ + { + Whl: "dist/test.whl", + }, + }, + }, + }, + }, + }, + }, + }, + }, + } + + diags := bundle.Apply(ctx, b, artifacts.BasicUpload("test")) + require.NoError(t, diags.Error()) + + // The remote path attribute on the artifact file should have been set. + require.Regexp(t, + regexp.MustCompile(path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`)), + b.Config.Artifacts["test"].Files[0].RemotePath, + ) + + // The task library path should have been updated to the remote path. + require.Regexp(t, + regexp.MustCompile(path.Join(regexp.QuoteMeta(volumePath), `.internal/test\.whl`)), + b.Config.Resources.Jobs["test"].JobSettings.Tasks[0].Libraries[0].Whl, + ) } diff --git a/internal/bundle/basic_test.go b/internal/bundle/basic_test.go new file mode 100644 index 000000000..c24ef0c05 --- /dev/null +++ b/internal/bundle/basic_test.go @@ -0,0 +1,43 @@ +package bundle + +import ( + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccBasicBundleDeployWithFailOnActiveRuns(t *testing.T) { + ctx, _ := acc.WorkspaceTest(t) + + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + uniqueId := uuid.New().String() + root, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, + }) + require.NoError(t, err) + + t.Cleanup(func() { + err = destroyBundle(t, ctx, root) + require.NoError(t, err) + }) + + // deploy empty bundle + err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) + require.NoError(t, err) + + // Remove .databricks directory to simulate a fresh deployment + err = os.RemoveAll(filepath.Join(root, ".databricks")) + require.NoError(t, err) + + // deploy empty bundle again + err = deployBundleWithFlags(t, ctx, root, []string{"--fail-on-active-runs"}) + require.NoError(t, err) +} diff --git a/internal/bundle/bind_resource_test.go b/internal/bundle/bind_resource_test.go new file mode 100644 index 000000000..d44ad2c31 --- /dev/null +++ b/internal/bundle/bind_resource_test.go @@ -0,0 +1,185 @@ +package bundle + +import ( + "fmt" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/jobs" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccBindJobToExistingJob(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + ctx, wt := acc.WorkspaceTest(t) + gt := &generateJobTest{T: t, w: wt.W} + + nodeTypeId := internal.GetNodeTypeId(env) + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "spark_version": "13.3.x-scala2.12", + "node_type_id": nodeTypeId, + }) + require.NoError(t, err) + + jobId := gt.createTestJob(ctx) + t.Cleanup(func() { + gt.destroyJob(ctx, jobId) + require.NoError(t, err) + }) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve") + _, _, err = c.Run() + require.NoError(t, err) + + // Remove .databricks directory to simulate a fresh deployment + err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + // Check that job is bound and updated with config from bundle + job, err := w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + require.NoError(t, err) + require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") + + c = internal.NewCobraTestRunner(t, "bundle", "deployment", "unbind", "foo") + _, _, err = c.Run() + require.NoError(t, err) + + // Remove .databricks directory to simulate a fresh deployment + err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) + require.NoError(t, err) + + err = destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // Check that job is unbound and exists after bundle is destroyed + job, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + require.NoError(t, err) + require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py") +} + +func TestAccAbortBind(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + ctx, wt := acc.WorkspaceTest(t) + gt := &generateJobTest{T: t, w: wt.W} + + nodeTypeId := internal.GetNodeTypeId(env) + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "spark_version": "13.3.x-scala2.12", + "node_type_id": nodeTypeId, + }) + require.NoError(t, err) + + jobId := gt.createTestJob(ctx) + t.Cleanup(func() { + gt.destroyJob(ctx, jobId) + destroyBundle(t, ctx, bundleRoot) + }) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + c := internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId)) + + // Simulate user aborting the bind. This is done by not providing any input to the prompt in non-interactive mode. + _, _, err = c.Run() + require.ErrorContains(t, err, "failed to bind the resource") + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + // Check that job is not bound and not updated with config from bundle + job, err := w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + require.NoError(t, err) + + require.NotEqual(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId)) + require.Contains(t, job.Settings.Tasks[0].NotebookTask.NotebookPath, "test") +} + +func TestAccGenerateAndBind(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + ctx, wt := acc.WorkspaceTest(t) + gt := &generateJobTest{T: t, w: wt.W} + + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ + "unique_id": uniqueId, + }) + require.NoError(t, err) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + jobId := gt.createTestJob(ctx) + t.Cleanup(func() { + _, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + if err == nil { + gt.destroyJob(ctx, jobId) + } + }) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", + "--key", "test_job_key", + "--existing-job-id", fmt.Sprint(jobId), + "--config-dir", filepath.Join(bundleRoot, "resources"), + "--source-dir", filepath.Join(bundleRoot, "src")) + _, _, err = c.Run() + require.NoError(t, err) + + _, err = os.Stat(filepath.Join(bundleRoot, "src", "test.py")) + require.NoError(t, err) + + matches, err := filepath.Glob(filepath.Join(bundleRoot, "resources", "test_job_key.yml")) + require.NoError(t, err) + + require.Len(t, matches, 1) + + c = internal.NewCobraTestRunner(t, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve") + _, _, err = c.Run() + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + err = destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // Check that job is bound and does not extsts after bundle is destroyed + _, err = w.Jobs.Get(ctx, jobs.GetJobRequest{ + JobId: jobId, + }) + require.ErrorContains(t, err, "does not exist.") +} diff --git a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json b/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json index 8fca7a7c4..f03ad1c2b 100644 --- a/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json +++ b/internal/bundle/bundles/deploy_then_remove_resources/databricks_template_schema.json @@ -3,6 +3,14 @@ "unique_id": { "type": "string", "description": "Unique ID for pipeline name" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" } } } diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/bar.py b/internal/bundle/bundles/deploy_then_remove_resources/template/bar.py new file mode 100644 index 000000000..4914a7436 --- /dev/null +++ b/internal/bundle/bundles/deploy_then_remove_resources/template/bar.py @@ -0,0 +1,2 @@ +# Databricks notebook source +print("hello") diff --git a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl b/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl index b74344e4c..f3be9aafd 100644 --- a/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl +++ b/internal/bundle/bundles/deploy_then_remove_resources/template/resources.yml.tmpl @@ -1,7 +1,18 @@ resources: + jobs: + foo: + name: test-bundle-job-{{.unique_id}} + tasks: + - task_key: my_notebook_task + new_cluster: + num_workers: 1 + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + notebook_task: + notebook_path: "./bar.py" pipelines: bar: name: test-bundle-pipeline-{{.unique_id}} libraries: - - notebook: - path: "./foo.py" + - file: + path: "./foo.py" diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json b/internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json new file mode 100644 index 000000000..ae765c58f --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/databricks_template_schema.json @@ -0,0 +1,13 @@ +{ + "properties": { + "project_name": { + "type": "string", + "default": "my_test_code", + "description": "Unique name for this project" + }, + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + } + } +} diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl b/internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl new file mode 100644 index 000000000..4a674dce0 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/template/databricks.yml.tmpl @@ -0,0 +1,25 @@ +bundle: + name: wheel-task-with-environments + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + +resources: + jobs: + some_other_job: + name: "[${bundle.target}] Test Wheel Job With Environments {{.unique_id}}" + tasks: + - task_key: TestTask + python_wheel_task: + package_name: my_test_code + entry_point: run + parameters: + - "one" + - "two" + environment_key: "test" + environments: + - environment_key: "test" + spec: + client: "1" + dependencies: + - ./dist/*.whl diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl b/internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl new file mode 100644 index 000000000..b528657b1 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/template/setup.py.tmpl @@ -0,0 +1,15 @@ +from setuptools import setup, find_packages + +import {{.project_name}} + +setup( + name="{{.project_name}}", + version={{.project_name}}.__version__, + author={{.project_name}}.__author__, + url="https://databricks.com", + author_email="john.doe@databricks.com", + description="my example wheel", + packages=find_packages(include=["{{.project_name}}"]), + entry_points={"group1": "run={{.project_name}}.__main__:main"}, + install_requires=["setuptools"], +) diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py b/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py new file mode 100644 index 000000000..909f1f322 --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__init__.py @@ -0,0 +1,2 @@ +__version__ = "0.0.1" +__author__ = "Databricks" diff --git a/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py b/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py new file mode 100644 index 000000000..ea918ce2d --- /dev/null +++ b/internal/bundle/bundles/python_wheel_task_with_environments/template/{{.project_name}}/__main__.py @@ -0,0 +1,16 @@ +""" +The entry point of the Python Wheel +""" + +import sys + + +def main(): + # This method will print the provided arguments + print("Hello from my func") + print("Got arguments:") + print(sys.argv) + + +if __name__ == "__main__": + main() diff --git a/internal/bundle/bundles/spark_jar_task/databricks_template_schema.json b/internal/bundle/bundles/spark_jar_task/databricks_template_schema.json new file mode 100644 index 000000000..078dff976 --- /dev/null +++ b/internal/bundle/bundles/spark_jar_task/databricks_template_schema.json @@ -0,0 +1,29 @@ +{ + "properties": { + "project_name": { + "type": "string", + "default": "my_java_project", + "description": "Unique name for this project" + }, + "spark_version": { + "type": "string", + "description": "Spark version used for job cluster" + }, + "node_type_id": { + "type": "string", + "description": "Node type id for job cluster" + }, + "unique_id": { + "type": "string", + "description": "Unique ID for job name" + }, + "root": { + "type": "string", + "description": "Path to the root of the template" + }, + "artifact_path": { + "type": "string", + "description": "Path to the remote base path for artifacts" + } + } +} diff --git a/internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl b/internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl new file mode 100644 index 000000000..24a6d7d8a --- /dev/null +++ b/internal/bundle/bundles/spark_jar_task/template/databricks.yml.tmpl @@ -0,0 +1,28 @@ +bundle: + name: spark-jar-task + +workspace: + root_path: "~/.bundle/{{.unique_id}}" + artifact_path: {{.artifact_path}} + +artifacts: + my_java_code: + path: ./{{.project_name}} + build: "javac PrintArgs.java && jar cvfm PrintArgs.jar META-INF/MANIFEST.MF PrintArgs.class" + files: + - source: ./{{.project_name}}/PrintArgs.jar + +resources: + jobs: + jar_job: + name: "[${bundle.target}] Test Spark Jar Job {{.unique_id}}" + tasks: + - task_key: TestSparkJarTask + new_cluster: + num_workers: 1 + spark_version: "{{.spark_version}}" + node_type_id: "{{.node_type_id}}" + spark_jar_task: + main_class_name: PrintArgs + libraries: + - jar: ./{{.project_name}}/PrintArgs.jar diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF b/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF new file mode 100644 index 000000000..40b023dbd --- /dev/null +++ b/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/META-INF/MANIFEST.MF @@ -0,0 +1 @@ +Main-Class: PrintArgs \ No newline at end of file diff --git a/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java b/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java new file mode 100644 index 000000000..b7430f25f --- /dev/null +++ b/internal/bundle/bundles/spark_jar_task/template/{{.project_name}}/PrintArgs.java @@ -0,0 +1,8 @@ +import java.util.Arrays; + +public class PrintArgs { + public static void main(String[] args) { + System.out.println("Hello from Jar!"); + System.out.println(Arrays.toString(args)); + } +} diff --git a/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl b/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl index 5d17e0fda..85d31ce3e 100644 --- a/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl +++ b/internal/bundle/bundles/with_includes/template/databricks.yml.tmpl @@ -4,5 +4,5 @@ bundle: workspace: root_path: "~/.bundle/{{.unique_id}}" -includes: - - resources/*yml +include: + - resources/*.yml diff --git a/internal/bundle/deploy_then_remove_resources_test.go b/internal/bundle/deploy_then_remove_resources_test.go index 73860593c..66ec5c16a 100644 --- a/internal/bundle/deploy_then_remove_resources_test.go +++ b/internal/bundle/deploy_then_remove_resources_test.go @@ -1,55 +1,65 @@ package bundle import ( - "context" "os" "path/filepath" "testing" "github.com/databricks/cli/internal" - "github.com/databricks/databricks-sdk-go" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestAccBundleDeployThenRemoveResources(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, "deploy_then_remove_resources", map[string]any{ - "unique_id": uniqueId, + bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, }) require.NoError(t, err) // deploy pipeline - err = deployBundle(t, bundleRoot) - require.NoError(t, err) - - w, err := databricks.NewWorkspaceClient() + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) // assert pipeline is created pipelineName := "test-bundle-pipeline-" + uniqueId - pipeline, err := w.Pipelines.GetByName(context.Background(), pipelineName) + pipeline, err := w.Pipelines.GetByName(ctx, pipelineName) require.NoError(t, err) assert.Equal(t, pipeline.Name, pipelineName) + // assert job is created + jobName := "test-bundle-job-" + uniqueId + job, err := w.Jobs.GetBySettingsName(ctx, jobName) + require.NoError(t, err) + assert.Equal(t, job.Settings.Name, jobName) + // delete resources.yml err = os.Remove(filepath.Join(bundleRoot, "resources.yml")) require.NoError(t, err) // deploy again - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) // assert pipeline is deleted - _, err = w.Pipelines.GetByName(context.Background(), pipelineName) + _, err = w.Pipelines.GetByName(ctx, pipelineName) + assert.ErrorContains(t, err, "does not exist") + + // assert job is deleted + _, err = w.Jobs.GetBySettingsName(ctx, jobName) assert.ErrorContains(t, err, "does not exist") t.Cleanup(func() { - err = destroyBundle(t, bundleRoot) + err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) }) } diff --git a/internal/bundle/deployment_state_test.go b/internal/bundle/deployment_state_test.go new file mode 100644 index 000000000..25f36d4a2 --- /dev/null +++ b/internal/bundle/deployment_state_test.go @@ -0,0 +1,102 @@ +package bundle + +import ( + "os" + "path" + "path/filepath" + "testing" + + "github.com/databricks/cli/bundle/deploy" + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccFilesAreSyncedCorrectlyWhenNoSnapshot(t *testing.T) { + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + nodeTypeId := internal.GetNodeTypeId(env) + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "basic", map[string]any{ + "unique_id": uniqueId, + "spark_version": "13.3.x-scala2.12", + "node_type_id": nodeTypeId, + }) + require.NoError(t, err) + + t.Setenv("BUNDLE_ROOT", bundleRoot) + + // Add some test file to the bundle + err = os.WriteFile(filepath.Join(bundleRoot, "test.py"), []byte("print('Hello, World!')"), 0644) + require.NoError(t, err) + + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Hello, World!')"), 0644) + require.NoError(t, err) + + // Add notebook to the bundle + err = os.WriteFile(filepath.Join(bundleRoot, "notebook.py"), []byte("# Databricks notebook source\nHello, World!"), 0644) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + t.Cleanup(func() { + destroyBundle(t, ctx, bundleRoot) + }) + + remoteRoot := getBundleRemoteRootPath(w, t, uniqueId) + + // Check that test file is in workspace + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py")) + require.NoError(t, err) + + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test_to_modify.py")) + require.NoError(t, err) + + // Check that notebook is in workspace + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "notebook")) + require.NoError(t, err) + + // Check that deployment.json is synced correctly + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "state", deploy.DeploymentStateFileName)) + require.NoError(t, err) + + // Remove .databricks directory to simulate a fresh deployment like in CI/CD environment + err = os.RemoveAll(filepath.Join(bundleRoot, ".databricks")) + require.NoError(t, err) + + // Remove the file from the bundle + err = os.Remove(filepath.Join(bundleRoot, "test.py")) + require.NoError(t, err) + + // Remove the notebook from the bundle and deploy again + err = os.Remove(filepath.Join(bundleRoot, "notebook.py")) + require.NoError(t, err) + + // Modify the content of another file + err = os.WriteFile(filepath.Join(bundleRoot, "test_to_modify.py"), []byte("print('Modified!')"), 0644) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // Check that removed file is not in workspace anymore + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "test.py")) + require.ErrorContains(t, err, "files/test.py") + require.ErrorContains(t, err, "doesn't exist") + + // Check that removed notebook is not in workspace anymore + _, err = w.Workspace.GetStatusByPath(ctx, path.Join(remoteRoot, "files", "notebook")) + require.ErrorContains(t, err, "files/notebook") + require.ErrorContains(t, err, "doesn't exist") + + // Check the content of modified file + content, err := w.Workspace.ReadFile(ctx, path.Join(remoteRoot, "files", "test_to_modify.py")) + require.NoError(t, err) + require.Equal(t, "print('Modified!')", string(content)) +} diff --git a/internal/bundle/destroy_test.go b/internal/bundle/destroy_test.go new file mode 100644 index 000000000..baccf4e6f --- /dev/null +++ b/internal/bundle/destroy_test.go @@ -0,0 +1,85 @@ +package bundle + +import ( + "errors" + "os" + "path/filepath" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/google/uuid" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccBundleDestroy(t *testing.T) { + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + uniqueId := uuid.New().String() + bundleRoot, err := initTestTemplate(t, ctx, "deploy_then_remove_resources", map[string]any{ + "unique_id": uniqueId, + "node_type_id": nodeTypeId, + "spark_version": defaultSparkVersion, + }) + require.NoError(t, err) + + snapshotsDir := filepath.Join(bundleRoot, ".databricks", "bundle", "default", "sync-snapshots") + + // Assert the snapshot file does not exist + _, err = os.ReadDir(snapshotsDir) + assert.ErrorIs(t, err, os.ErrNotExist) + + // deploy resources + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // Assert the snapshot file exists + entries, err := os.ReadDir(snapshotsDir) + assert.NoError(t, err) + assert.Len(t, entries, 1) + + // Assert bundle deployment path is created + remoteRoot := getBundleRemoteRootPath(w, t, uniqueId) + _, err = w.Workspace.GetStatusByPath(ctx, remoteRoot) + assert.NoError(t, err) + + // assert pipeline is created + pipelineName := "test-bundle-pipeline-" + uniqueId + pipeline, err := w.Pipelines.GetByName(ctx, pipelineName) + require.NoError(t, err) + assert.Equal(t, pipeline.Name, pipelineName) + + // assert job is created + jobName := "test-bundle-job-" + uniqueId + job, err := w.Jobs.GetBySettingsName(ctx, jobName) + require.NoError(t, err) + assert.Equal(t, job.Settings.Name, jobName) + + // destroy bundle + err = destroyBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + // assert pipeline is deleted + _, err = w.Pipelines.GetByName(ctx, pipelineName) + assert.ErrorContains(t, err, "does not exist") + + // assert job is deleted + _, err = w.Jobs.GetBySettingsName(ctx, jobName) + assert.ErrorContains(t, err, "does not exist") + + // Assert snapshot file is deleted + entries, err = os.ReadDir(snapshotsDir) + require.NoError(t, err) + assert.Len(t, entries, 0) + + // Assert bundle deployment path is deleted + _, err = w.Workspace.GetStatusByPath(ctx, remoteRoot) + apiErr := &apierr.APIError{} + assert.True(t, errors.As(err, &apiErr)) + assert.Equal(t, "RESOURCE_DOES_NOT_EXIST", apiErr.ErrorCode) +} diff --git a/internal/bundle/empty_bundle_test.go b/internal/bundle/empty_bundle_test.go index 9b39368f4..36883ae00 100644 --- a/internal/bundle/empty_bundle_test.go +++ b/internal/bundle/empty_bundle_test.go @@ -6,14 +6,13 @@ import ( "path/filepath" "testing" - "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" "github.com/google/uuid" "github.com/stretchr/testify/require" ) func TestAccEmptyBundleDeploy(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, _ := acc.WorkspaceTest(t) // create empty bundle tmpDir := t.TempDir() @@ -27,11 +26,11 @@ func TestAccEmptyBundleDeploy(t *testing.T) { f.Close() // deploy empty bundle - err = deployBundle(t, tmpDir) + err = deployBundle(t, ctx, tmpDir) require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, tmpDir) + err = destroyBundle(t, ctx, tmpDir) require.NoError(t, err) }) } diff --git a/internal/bundle/environments_test.go b/internal/bundle/environments_test.go new file mode 100644 index 000000000..5cffe8857 --- /dev/null +++ b/internal/bundle/environments_test.go @@ -0,0 +1,39 @@ +package bundle + +import ( + "testing" + + "github.com/databricks/cli/internal/acc" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestAccPythonWheelTaskWithEnvironmentsDeployAndRun(t *testing.T) { + t.Skip("Skipping test until serveless is enabled") + + ctx, _ := acc.WorkspaceTest(t) + + bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task_with_environments", map[string]any{ + "unique_id": uuid.New().String(), + }) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + t.Cleanup(func() { + destroyBundle(t, ctx, bundleRoot) + }) + + out, err := runResource(t, ctx, bundleRoot, "some_other_job") + require.NoError(t, err) + require.Contains(t, out, "Hello from my func") + require.Contains(t, out, "Got arguments:") + require.Contains(t, out, "['my_test_code', 'one', 'two']") + + out, err = runResourceWithParams(t, ctx, bundleRoot, "some_other_job", "--python-params=param1,param2") + require.NoError(t, err) + require.Contains(t, out, "Hello from my func") + require.Contains(t, out, "Got arguments:") + require.Contains(t, out, "['my_test_code', 'param1', 'param2']") +} diff --git a/internal/bundle/generate_job_test.go b/internal/bundle/generate_job_test.go index e9445abc5..847a7a14e 100644 --- a/internal/bundle/generate_job_test.go +++ b/internal/bundle/generate_job_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" @@ -20,23 +21,22 @@ import ( ) func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, wt := acc.WorkspaceTest(t) + gt := &generateJobTest{T: t, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, "with_includes", map[string]any{ + bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) require.NoError(t, err) - jobId := createTestJob(t) + jobId := gt.createTestJob(ctx) t.Cleanup(func() { - destroyJob(t, jobId) - require.NoError(t, err) + gt.destroyJob(ctx, jobId) }) t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunner(t, "bundle", "generate", "job", + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "job", "--existing-job-id", fmt.Sprint(jobId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) @@ -61,15 +61,22 @@ func TestAccGenerateFromExistingJobAndDeploy(t *testing.T) { require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12") require.Contains(t, generatedYaml, "num_workers: 1") - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) - err = destroyBundle(t, bundleRoot) + err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) - } -func createTestJob(t *testing.T) int64 { +type generateJobTest struct { + T *testing.T + w *databricks.WorkspaceClient +} + +func (gt *generateJobTest) createTestJob(ctx context.Context) int64 { + t := gt.T + w := gt.w + var nodeTypeId string switch testutil.GetCloud(t) { case testutil.AWS: @@ -80,10 +87,6 @@ func createTestJob(t *testing.T) int64 { nodeTypeId = "n1-standard-4" } - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - ctx := context.Background() tmpdir := internal.TemporaryWorkspaceDir(t, w) f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -100,6 +103,11 @@ func createTestJob(t *testing.T) int64 { SparkVersion: "13.3.x-scala2.12", NumWorkers: 1, NodeTypeId: nodeTypeId, + SparkConf: map[string]string{ + "spark.databricks.enableWsfs": "true", + "spark.databricks.hive.metastore.glueCatalog.enabled": "true", + "spark.databricks.pip.ignoreSSL": "true", + }, }, NotebookTask: &jobs.NotebookTask{ NotebookPath: path.Join(tmpdir, "test"), @@ -112,13 +120,9 @@ func createTestJob(t *testing.T) int64 { return resp.JobId } -func destroyJob(t *testing.T, jobId int64) { - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - ctx := context.Background() - err = w.Jobs.Delete(ctx, jobs.DeleteJob{ +func (gt *generateJobTest) destroyJob(ctx context.Context, jobId int64) { + err := gt.w.Jobs.Delete(ctx, jobs.DeleteJob{ JobId: jobId, }) - require.NoError(t, err) + require.NoError(gt.T, err) } diff --git a/internal/bundle/generate_pipeline_test.go b/internal/bundle/generate_pipeline_test.go index 7b2323e6a..82467952d 100644 --- a/internal/bundle/generate_pipeline_test.go +++ b/internal/bundle/generate_pipeline_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/pipelines" @@ -18,23 +19,22 @@ import ( ) func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, wt := acc.WorkspaceTest(t) + gt := &generatePipelineTest{T: t, w: wt.W} uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, "with_includes", map[string]any{ + bundleRoot, err := initTestTemplate(t, ctx, "with_includes", map[string]any{ "unique_id": uniqueId, }) require.NoError(t, err) - pipelineId := createTestPipeline(t) + pipelineId, name := gt.createTestPipeline(ctx) t.Cleanup(func() { - destroyPipeline(t, pipelineId) - require.NoError(t, err) + gt.destroyPipeline(ctx, pipelineId) }) t.Setenv("BUNDLE_ROOT", bundleRoot) - c := internal.NewCobraTestRunner(t, "bundle", "generate", "pipeline", + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "generate", "pipeline", "--existing-pipeline-id", fmt.Sprint(pipelineId), "--config-dir", filepath.Join(bundleRoot, "resources"), "--source-dir", filepath.Join(bundleRoot, "src")) @@ -52,27 +52,38 @@ func TestAccGenerateFromExistingPipelineAndDeploy(t *testing.T) { require.Len(t, matches, 1) // check the content of generated yaml - data, err := os.ReadFile(matches[0]) + fileName := matches[0] + data, err := os.ReadFile(fileName) require.NoError(t, err) generatedYaml := string(data) + + // Replace pipeline name + generatedYaml = strings.ReplaceAll(generatedYaml, name, internal.RandomName("copy-generated-pipeline-")) + err = os.WriteFile(fileName, []byte(generatedYaml), 0644) + require.NoError(t, err) + require.Contains(t, generatedYaml, "libraries:") require.Contains(t, generatedYaml, "- notebook:") require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "notebook.py"))) require.Contains(t, generatedYaml, "- file:") require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py"))) - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) - err = destroyBundle(t, bundleRoot) + err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) } -func createTestPipeline(t *testing.T) string { - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) +type generatePipelineTest struct { + T *testing.T + w *databricks.WorkspaceClient +} + +func (gt *generatePipelineTest) createTestPipeline(ctx context.Context) (string, string) { + t := gt.T + w := gt.w - ctx := context.Background() tmpdir := internal.TemporaryWorkspaceDir(t, w) f, err := filer.NewWorkspaceFilesClient(w, tmpdir) require.NoError(t, err) @@ -83,8 +94,12 @@ func createTestPipeline(t *testing.T) string { err = f.Write(ctx, "test.py", strings.NewReader("print('Hello!')")) require.NoError(t, err) + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + nodeTypeId := internal.GetNodeTypeId(env) + + name := internal.RandomName("generated-pipeline-") resp, err := w.Pipelines.Create(ctx, pipelines.CreatePipeline{ - Name: internal.RandomName("generated-pipeline-"), + Name: name, Libraries: []pipelines.PipelineLibrary{ { Notebook: &pipelines.NotebookLibrary{ @@ -97,19 +112,31 @@ func createTestPipeline(t *testing.T) string { }, }, }, + Clusters: []pipelines.PipelineCluster{ + { + CustomTags: map[string]string{ + "Tag1": "Yes", + "Tag2": "24X7", + "Tag3": "APP-1234", + }, + NodeTypeId: nodeTypeId, + NumWorkers: 2, + SparkConf: map[string]string{ + "spark.databricks.enableWsfs": "true", + "spark.databricks.hive.metastore.glueCatalog.enabled": "true", + "spark.databricks.pip.ignoreSSL": "true", + }, + }, + }, }) require.NoError(t, err) - return resp.PipelineId + return resp.PipelineId, name } -func destroyPipeline(t *testing.T, pipelineId string) { - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - ctx := context.Background() - err = w.Pipelines.Delete(ctx, pipelines.DeletePipelineRequest{ +func (gt *generatePipelineTest) destroyPipeline(ctx context.Context, pipelineId string) { + err := gt.w.Pipelines.Delete(ctx, pipelines.DeletePipelineRequest{ PipelineId: pipelineId, }) - require.NoError(t, err) + require.NoError(gt.T, err) } diff --git a/internal/bundle/helpers.go b/internal/bundle/helpers.go index 681edc2d6..c33c15331 100644 --- a/internal/bundle/helpers.go +++ b/internal/bundle/helpers.go @@ -3,6 +3,7 @@ package bundle import ( "context" "encoding/json" + "fmt" "os" "path/filepath" "strings" @@ -13,19 +14,27 @@ import ( "github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/flags" "github.com/databricks/cli/libs/template" + "github.com/databricks/databricks-sdk-go" + "github.com/stretchr/testify/require" ) -func initTestTemplate(t *testing.T, templateName string, config map[string]any) (string, error) { +const defaultSparkVersion = "13.3.x-snapshot-scala2.12" + +func initTestTemplate(t *testing.T, ctx context.Context, templateName string, config map[string]any) (string, error) { + bundleRoot := t.TempDir() + return initTestTemplateWithBundleRoot(t, ctx, templateName, config, bundleRoot) +} + +func initTestTemplateWithBundleRoot(t *testing.T, ctx context.Context, templateName string, config map[string]any, bundleRoot string) (string, error) { templateRoot := filepath.Join("bundles", templateName) - bundleRoot := t.TempDir() configFilePath, err := writeConfigFile(t, config) if err != nil { return "", err } - ctx := root.SetWorkspaceClient(context.Background(), nil) - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "bundles") + ctx = root.SetWorkspaceClient(ctx, nil) + cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "bundles") ctx = cmdio.InContext(ctx, cmd) err = template.Materialize(ctx, configFilePath, templateRoot, bundleRoot) @@ -46,15 +55,30 @@ func writeConfigFile(t *testing.T, config map[string]any) (string, error) { return filepath, err } -func deployBundle(t *testing.T, path string) error { +func validateBundle(t *testing.T, ctx context.Context, path string) ([]byte, error) { t.Setenv("BUNDLE_ROOT", path) - c := internal.NewCobraTestRunner(t, "bundle", "deploy", "--force-lock") + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "validate", "--output", "json") + stdout, _, err := c.Run() + return stdout.Bytes(), err +} + +func deployBundle(t *testing.T, ctx context.Context, path string) error { + t.Setenv("BUNDLE_ROOT", path) + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "deploy", "--force-lock") _, _, err := c.Run() return err } -func runResource(t *testing.T, path string, key string) (string, error) { - ctx := context.Background() +func deployBundleWithFlags(t *testing.T, ctx context.Context, path string, flags []string) error { + t.Setenv("BUNDLE_ROOT", path) + args := []string{"bundle", "deploy", "--force-lock"} + args = append(args, flags...) + c := internal.NewCobraTestRunnerWithContext(t, ctx, args...) + _, _, err := c.Run() + return err +} + +func runResource(t *testing.T, ctx context.Context, path string, key string) (string, error) { ctx = cmdio.NewContext(ctx, cmdio.Default()) c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "run", key) @@ -62,8 +86,7 @@ func runResource(t *testing.T, path string, key string) (string, error) { return stdout.String(), err } -func runResourceWithParams(t *testing.T, path string, key string, params ...string) (string, error) { - ctx := context.Background() +func runResourceWithParams(t *testing.T, ctx context.Context, path string, key string, params ...string) (string, error) { ctx = cmdio.NewContext(ctx, cmdio.Default()) args := make([]string, 0) @@ -74,9 +97,17 @@ func runResourceWithParams(t *testing.T, path string, key string, params ...stri return stdout.String(), err } -func destroyBundle(t *testing.T, path string) error { +func destroyBundle(t *testing.T, ctx context.Context, path string) error { t.Setenv("BUNDLE_ROOT", path) - c := internal.NewCobraTestRunner(t, "bundle", "destroy", "--auto-approve") + c := internal.NewCobraTestRunnerWithContext(t, ctx, "bundle", "destroy", "--auto-approve") _, _, err := c.Run() return err } + +func getBundleRemoteRootPath(w *databricks.WorkspaceClient, t *testing.T, uniqueId string) string { + // Compute root path for the bundle deployment + me, err := w.CurrentUser.Me(context.Background()) + require.NoError(t, err) + root := fmt.Sprintf("/Users/%s/.bundle/%s", me.UserName, uniqueId) + return root +} diff --git a/internal/bundle/job_metadata_test.go b/internal/bundle/job_metadata_test.go index 3e2bb7f03..21f1086ae 100644 --- a/internal/bundle/job_metadata_test.go +++ b/internal/bundle/job_metadata_test.go @@ -3,7 +3,6 @@ package bundle import ( "context" "encoding/json" - "fmt" "io" "path" "strconv" @@ -12,36 +11,34 @@ import ( "github.com/databricks/cli/bundle/config" "github.com/databricks/cli/bundle/metadata" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/google/uuid" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestAccJobsMetadataFile(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) uniqueId := uuid.New().String() - bundleRoot, err := initTestTemplate(t, "job_metadata", map[string]any{ + bundleRoot, err := initTestTemplate(t, ctx, "job_metadata", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, - "spark_version": "13.2.x-snapshot-scala2.12", + "spark_version": defaultSparkVersion, }) require.NoError(t, err) // deploy bundle - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) // Cleanup the deployed bundle t.Cleanup(func() { - err = destroyBundle(t, bundleRoot) + err = destroyBundle(t, ctx, bundleRoot) require.NoError(t, err) }) @@ -58,9 +55,7 @@ func TestAccJobsMetadataFile(t *testing.T) { assert.Equal(t, job2.Settings.Name, jobName) // Compute root path for the bundle deployment - me, err := w.CurrentUser.Me(context.Background()) - require.NoError(t, err) - root := fmt.Sprintf("/Users/%s/.bundle/%s", me.UserName, uniqueId) + root := getBundleRemoteRootPath(w, t, uniqueId) f, err := filer.NewWorkspaceFilesClient(w, root) require.NoError(t, err) diff --git a/internal/bundle/local_state_staleness_test.go b/internal/bundle/local_state_staleness_test.go index 06cfe0e0d..d11234667 100644 --- a/internal/bundle/local_state_staleness_test.go +++ b/internal/bundle/local_state_staleness_test.go @@ -5,7 +5,8 @@ import ( "testing" "github.com/databricks/cli/internal" - "github.com/databricks/databricks-sdk-go" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/service/jobs" "github.com/google/uuid" @@ -14,11 +15,8 @@ import ( ) func TestAccLocalStateStaleness(t *testing.T) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + ctx, wt := acc.WorkspaceTest(t) + w := wt.W // The approach for this test is as follows: // 1) First deploy of bundle instance A @@ -27,37 +25,39 @@ func TestAccLocalStateStaleness(t *testing.T) { // Because of deploy (2), the locally cached state of bundle instance A should be stale. // Then for deploy (3), it must use the remote state over the stale local state. - nodeTypeId := internal.GetNodeTypeId(env) + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) uniqueId := uuid.New().String() initialize := func() string { - root, err := initTestTemplate(t, "basic", map[string]any{ + root, err := initTestTemplate(t, ctx, "basic", map[string]any{ "unique_id": uniqueId, "node_type_id": nodeTypeId, - "spark_version": "13.2.x-snapshot-scala2.12", + "spark_version": defaultSparkVersion, }) require.NoError(t, err) t.Cleanup(func() { - err = destroyBundle(t, root) + err = destroyBundle(t, ctx, root) require.NoError(t, err) }) return root } + var err error + bundleA := initialize() bundleB := initialize() // 1) Deploy bundle A - err = deployBundle(t, bundleA) + err = deployBundle(t, ctx, bundleA) require.NoError(t, err) // 2) Deploy bundle B - err = deployBundle(t, bundleB) + err = deployBundle(t, ctx, bundleB) require.NoError(t, err) // 3) Deploy bundle A again - err = deployBundle(t, bundleA) + err = deployBundle(t, ctx, bundleA) require.NoError(t, err) // Assert that there is only a single job in the workspace corresponding to this bundle. diff --git a/internal/bundle/python_wheel_test.go b/internal/bundle/python_wheel_test.go index c94ed93a3..bf2462920 100644 --- a/internal/bundle/python_wheel_test.go +++ b/internal/bundle/python_wheel_test.go @@ -4,24 +4,17 @@ import ( "testing" "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/databricks/cli/libs/env" "github.com/google/uuid" "github.com/stretchr/testify/require" ) func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bool) { - env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") - t.Log(env) + ctx, _ := acc.WorkspaceTest(t) - var nodeTypeId string - if env == "gcp" { - nodeTypeId = "n1-standard-4" - } else if env == "aws" { - nodeTypeId = "i3.xlarge" - } else { - nodeTypeId = "Standard_DS4_v2" - } - - bundleRoot, err := initTestTemplate(t, "python_wheel_task", map[string]any{ + nodeTypeId := internal.GetNodeTypeId(env.Get(ctx, "CLOUD_ENV")) + bundleRoot, err := initTestTemplate(t, ctx, "python_wheel_task", map[string]any{ "node_type_id": nodeTypeId, "unique_id": uuid.New().String(), "spark_version": sparkVersion, @@ -29,20 +22,20 @@ func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bo }) require.NoError(t, err) - err = deployBundle(t, bundleRoot) + err = deployBundle(t, ctx, bundleRoot) require.NoError(t, err) t.Cleanup(func() { - destroyBundle(t, bundleRoot) + destroyBundle(t, ctx, bundleRoot) }) - out, err := runResource(t, bundleRoot, "some_other_job") + out, err := runResource(t, ctx, bundleRoot, "some_other_job") require.NoError(t, err) require.Contains(t, out, "Hello from my func") require.Contains(t, out, "Got arguments:") require.Contains(t, out, "['my_test_code', 'one', 'two']") - out, err = runResourceWithParams(t, bundleRoot, "some_other_job", "--python-params=param1,param2") + out, err = runResourceWithParams(t, ctx, bundleRoot, "some_other_job", "--python-params=param1,param2") require.NoError(t, err) require.Contains(t, out, "Hello from my func") require.Contains(t, out, "Got arguments:") @@ -50,7 +43,7 @@ func runPythonWheelTest(t *testing.T, sparkVersion string, pythonWheelWrapper bo } func TestAccPythonWheelTaskDeployAndRunWithoutWrapper(t *testing.T) { - runPythonWheelTest(t, "13.2.x-snapshot-scala2.12", false) + runPythonWheelTest(t, "13.3.x-snapshot-scala2.12", false) } func TestAccPythonWheelTaskDeployAndRunWithWrapper(t *testing.T) { diff --git a/internal/bundle/spark_jar_test.go b/internal/bundle/spark_jar_test.go new file mode 100644 index 000000000..c981e7750 --- /dev/null +++ b/internal/bundle/spark_jar_test.go @@ -0,0 +1,52 @@ +package bundle + +import ( + "os" + "testing" + + "github.com/databricks/cli/internal" + "github.com/databricks/cli/internal/acc" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func runSparkJarTest(t *testing.T, sparkVersion string) { + t.Skip("Temporarily skipping the test until auth / permission issues for UC volumes are resolved.") + + env := internal.GetEnvOrSkipTest(t, "CLOUD_ENV") + t.Log(env) + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + ctx, wt := acc.WorkspaceTest(t) + w := wt.W + volumePath := internal.TemporaryUcVolume(t, w) + + nodeTypeId := internal.GetNodeTypeId(env) + tmpDir := t.TempDir() + bundleRoot, err := initTestTemplateWithBundleRoot(t, ctx, "spark_jar_task", map[string]any{ + "node_type_id": nodeTypeId, + "unique_id": uuid.New().String(), + "spark_version": sparkVersion, + "root": tmpDir, + "artifact_path": volumePath, + }, tmpDir) + require.NoError(t, err) + + err = deployBundle(t, ctx, bundleRoot) + require.NoError(t, err) + + t.Cleanup(func() { + destroyBundle(t, ctx, bundleRoot) + }) + + out, err := runResource(t, ctx, bundleRoot, "jar_job") + require.NoError(t, err) + require.Contains(t, out, "Hello from Jar!") +} + +func TestAccSparkJarTaskDeployAndRunOnVolumes(t *testing.T) { + runSparkJarTest(t, "14.3.x-scala2.12") +} diff --git a/internal/bundle/validate_test.go b/internal/bundle/validate_test.go new file mode 100644 index 000000000..18da89e4c --- /dev/null +++ b/internal/bundle/validate_test.go @@ -0,0 +1,60 @@ +package bundle + +import ( + "context" + "encoding/json" + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/convert" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestAccBundleValidate(t *testing.T) { + testutil.GetEnvOrSkipTest(t, "CLOUD_ENV") + + tmpDir := t.TempDir() + testutil.WriteFile(t, + ` +bundle: + name: "foobar" + +resources: + jobs: + outer_loop: + name: outer loop + tasks: + - task_key: my task + run_job_task: + job_id: ${resources.jobs.inner_loop.id} + + inner_loop: + name: inner loop + +`, tmpDir, "databricks.yml") + + ctx := context.Background() + stdout, err := validateBundle(t, ctx, tmpDir) + require.NoError(t, err) + + config := make(map[string]any) + err = json.Unmarshal(stdout, &config) + require.NoError(t, err) + + getValue := func(key string) any { + v, err := convert.FromTyped(config, dyn.NilValue) + require.NoError(t, err) + v, err = dyn.GetByPath(v, dyn.MustPathFromString(key)) + require.NoError(t, err) + return v.AsAny() + } + + assert.Equal(t, "foobar", getValue("bundle.name")) + assert.Equal(t, "outer loop", getValue("resources.jobs.outer_loop.name")) + assert.Equal(t, "inner loop", getValue("resources.jobs.inner_loop.name")) + assert.Equal(t, "my task", getValue("resources.jobs.outer_loop.tasks[0].task_key")) + // Assert resource references are retained in the output. + assert.Equal(t, "${resources.jobs.inner_loop.id}", getValue("resources.jobs.outer_loop.tasks[0].run_job_task.job_id")) +} diff --git a/internal/clusters_test.go b/internal/clusters_test.go index e8208d047..6daddcce3 100644 --- a/internal/clusters_test.go +++ b/internal/clusters_test.go @@ -36,5 +36,5 @@ func TestAccClustersGet(t *testing.T) { func TestClusterCreateErrorWhenNoArguments(t *testing.T) { _, _, err := RequireErrorRun(t, "clusters", "create") - assert.Equal(t, "accepts 1 arg(s), received 0", err.Error()) + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } diff --git a/internal/filer_test.go b/internal/filer_test.go index b1af6886c..275304256 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -3,17 +3,17 @@ package internal import ( "bytes" "context" + "encoding/json" "errors" + "fmt" "io" "io/fs" - "net/http" + "path" "regexp" "strings" "testing" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/apierr" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -40,15 +40,118 @@ func (f filerTest) assertContents(ctx context.Context, name string, contents str assert.Equal(f, contents, body.String()) } -func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { +func (f filerTest) assertContentsJupyter(ctx context.Context, name string) { + reader, err := f.Read(ctx, name) + if !assert.NoError(f, err) { + return + } + + defer reader.Close() + + var body bytes.Buffer + _, err = io.Copy(&body, reader) + if !assert.NoError(f, err) { + return + } + + var actual map[string]any + err = json.Unmarshal(body.Bytes(), &actual) + if !assert.NoError(f, err) { + return + } + + // Since a roundtrip to the workspace changes a Jupyter notebook's payload, + // the best we can do is assert that the nbformat is correct. + assert.EqualValues(f, 4, actual["nbformat"]) +} + +func (f filerTest) assertNotExists(ctx context.Context, name string) { + _, err := f.Stat(ctx, name) + assert.ErrorIs(f, err, fs.ErrNotExist) +} + +func commonFilerRecursiveDeleteTest(t *testing.T, ctx context.Context, f filer.Filer) { var err error - // Write should fail because the root path doesn't yet exist. + err = f.Write(ctx, "dir/file1", strings.NewReader("content1"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/file1", `content1`) + + err = f.Write(ctx, "dir/file2", strings.NewReader("content2"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/file2", `content2`) + + err = f.Write(ctx, "dir/subdir1/file3", strings.NewReader("content3"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/subdir1/file3", `content3`) + + err = f.Write(ctx, "dir/subdir1/file4", strings.NewReader("content4"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/subdir1/file4", `content4`) + + err = f.Write(ctx, "dir/subdir2/file5", strings.NewReader("content5"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/subdir2/file5", `content5`) + + err = f.Write(ctx, "dir/subdir2/file6", strings.NewReader("content6"), filer.CreateParentDirectories) + require.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "dir/subdir2/file6", `content6`) + + entriesBeforeDelete, err := f.ReadDir(ctx, "dir") + require.NoError(t, err) + assert.Len(t, entriesBeforeDelete, 4) + + names := []string{} + for _, e := range entriesBeforeDelete { + names = append(names, e.Name()) + } + assert.Equal(t, names, []string{"file1", "file2", "subdir1", "subdir2"}) + + err = f.Delete(ctx, "dir") + assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) + + err = f.Delete(ctx, "dir", filer.DeleteRecursively) + assert.NoError(t, err) + _, err = f.ReadDir(ctx, "dir") + assert.ErrorAs(t, err, &filer.NoSuchDirectoryError{}) +} + +func TestAccFilerRecursiveDelete(t *testing.T) { + t.Parallel() + + for _, testCase := range []struct { + name string + f func(t *testing.T) (filer.Filer, string) + }{ + {"local", setupLocalFiler}, + {"workspace files", setupWsfsFiler}, + {"dbfs", setupDbfsFiler}, + {"files", setupUcVolumesFiler}, + {"workspace files extensions", setupWsfsExtensionsFiler}, + } { + tc := testCase + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + f, _ := tc.f(t) + ctx := context.Background() + + // Common tests we run across all filers to ensure consistent behavior. + commonFilerRecursiveDeleteTest(t, ctx, f) + }) + } +} + +// Common tests we run across all filers to ensure consistent behavior. +func commonFilerReadWriteTests(t *testing.T, ctx context.Context, f filer.Filer) { + var err error + + // Write should fail because the intermediate directory doesn't exist. err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`)) assert.True(t, errors.As(err, &filer.NoSuchDirectoryError{})) assert.True(t, errors.Is(err, fs.ErrNotExist)) - // Read should fail because the root path doesn't yet exist. + // Read should fail because the intermediate directory doesn't yet exist. _, err = f.Read(ctx, "/foo/bar") assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) assert.True(t, errors.Is(err, fs.ErrNotExist)) @@ -96,12 +199,12 @@ func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { // Delete should fail if the file doesn't exist. err = f.Delete(ctx, "/doesnt_exist") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) + assert.ErrorAs(t, err, &filer.FileDoesNotExistError{}) assert.True(t, errors.Is(err, fs.ErrNotExist)) // Stat should fail if the file doesn't exist. _, err = f.Stat(ctx, "/doesnt_exist") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) + assert.ErrorAs(t, err, &filer.FileDoesNotExistError{}) assert.True(t, errors.Is(err, fs.ErrNotExist)) // Delete should succeed for file that does exist. @@ -110,7 +213,7 @@ func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { // Delete should fail for a non-empty directory. err = f.Delete(ctx, "/foo") - assert.True(t, errors.As(err, &filer.DirectoryNotEmptyError{})) + assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) assert.True(t, errors.Is(err, fs.ErrInvalid)) // Delete should succeed for a non-empty directory if the DeleteRecursively flag is set. @@ -124,7 +227,34 @@ func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.True(t, errors.Is(err, fs.ErrInvalid)) } -func runFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { +func TestAccFilerReadWrite(t *testing.T) { + t.Parallel() + + for _, testCase := range []struct { + name string + f func(t *testing.T) (filer.Filer, string) + }{ + {"local", setupLocalFiler}, + {"workspace files", setupWsfsFiler}, + {"dbfs", setupDbfsFiler}, + {"files", setupUcVolumesFiler}, + {"workspace files extensions", setupWsfsExtensionsFiler}, + } { + tc := testCase + + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + f, _ := tc.f(t) + ctx := context.Background() + + // Common tests we run across all filers to ensure consistent behavior. + commonFilerReadWriteTests(t, ctx, f) + }) + } +} + +// Common tests we run across all filers to ensure consistent behavior. +func commonFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { var err error var info fs.FileInfo @@ -206,54 +336,29 @@ func runFilerReadDirTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.False(t, entries[0].IsDir()) } -func setupWorkspaceFilesTest(t *testing.T) (context.Context, filer.Filer) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFilerReadDir(t *testing.T) { + t.Parallel() - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryWorkspaceDir(t, w) - f, err := filer.NewWorkspaceFilesClient(w, tmpdir) - require.NoError(t, err) + for _, testCase := range []struct { + name string + f func(t *testing.T) (filer.Filer, string) + }{ + {"local", setupLocalFiler}, + {"workspace files", setupWsfsFiler}, + {"dbfs", setupDbfsFiler}, + {"files", setupUcVolumesFiler}, + {"workspace files extensions", setupWsfsExtensionsFiler}, + } { + tc := testCase - // Check if we can use this API here, skip test if we cannot. - _, err = f.Read(ctx, "we_use_this_call_to_test_if_this_api_is_enabled") - var aerr *apierr.APIError - if errors.As(err, &aerr) && aerr.StatusCode == http.StatusBadRequest { - t.Skip(aerr.Message) + t.Run(testCase.name, func(t *testing.T) { + t.Parallel() + f, _ := tc.f(t) + ctx := context.Background() + + commonFilerReadDirTest(t, ctx, f) + }) } - - return ctx, f -} - -func TestAccFilerWorkspaceFilesReadWrite(t *testing.T) { - ctx, f := setupWorkspaceFilesTest(t) - runFilerReadWriteTest(t, ctx, f) -} - -func TestAccFilerWorkspaceFilesReadDir(t *testing.T) { - ctx, f := setupWorkspaceFilesTest(t) - runFilerReadDirTest(t, ctx, f) -} - -func setupFilerDbfsTest(t *testing.T) (context.Context, filer.Filer) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := TemporaryDbfsDir(t, w) - f, err := filer.NewDbfsClient(w, tmpdir) - require.NoError(t, err) - return ctx, f -} - -func TestAccFilerDbfsReadWrite(t *testing.T) { - ctx, f := setupFilerDbfsTest(t) - runFilerReadWriteTest(t, ctx, f) -} - -func TestAccFilerDbfsReadDir(t *testing.T) { - ctx, f := setupFilerDbfsTest(t) - runFilerReadDirTest(t, ctx, f) } var jupyterNotebookContent1 = ` @@ -305,7 +410,10 @@ var jupyterNotebookContent2 = ` ` func TestAccFilerWorkspaceNotebookConflict(t *testing.T) { - ctx, f := setupWorkspaceFilesTest(t) + t.Parallel() + + f, _ := setupWsfsFiler(t) + ctx := context.Background() var err error // Upload the notebooks @@ -350,7 +458,10 @@ func TestAccFilerWorkspaceNotebookConflict(t *testing.T) { } func TestAccFilerWorkspaceNotebookWithOverwriteFlag(t *testing.T) { - ctx, f := setupWorkspaceFilesTest(t) + t.Parallel() + + f, _ := setupWsfsFiler(t) + ctx := context.Background() var err error // Upload notebooks @@ -392,139 +503,330 @@ func TestAccFilerWorkspaceNotebookWithOverwriteFlag(t *testing.T) { filerTest{t, f}.assertContents(ctx, "jupyterNb", "# Databricks notebook source\nprint(\"Jupyter Notebook Version 2\")") } -func setupFilerLocalTest(t *testing.T) (context.Context, filer.Filer) { - ctx := context.Background() - f, err := filer.NewLocalClient(t.TempDir()) - require.NoError(t, err) - return ctx, f -} +func TestAccFilerWorkspaceFilesExtensionsReadDir(t *testing.T) { + t.Parallel() -func TestAccFilerLocalReadWrite(t *testing.T) { - ctx, f := setupFilerLocalTest(t) - runFilerReadWriteTest(t, ctx, f) -} + files := []struct { + name string + content string + }{ + {"dir1/dir2/dir3/file.txt", "file content"}, + {"dir1/notebook.py", "# Databricks notebook source\nprint('first upload'))"}, + {"foo.py", "print('foo')"}, + {"foo.r", "print('foo')"}, + {"foo.scala", "println('foo')"}, + {"foo.sql", "SELECT 'foo'"}, + {"jupyterNb.ipynb", jupyterNotebookContent1}, + {"jupyterNb2.ipynb", jupyterNotebookContent2}, + {"pyNb.py", "# Databricks notebook source\nprint('first upload'))"}, + {"rNb.r", "# Databricks notebook source\nprint('first upload'))"}, + {"scalaNb.scala", "// Databricks notebook source\n println(\"first upload\"))"}, + {"sqlNb.sql", "-- Databricks notebook source\n SELECT \"first upload\""}, + } -func TestAccFilerLocalReadDir(t *testing.T) { - ctx, f := setupFilerLocalTest(t) - runFilerReadDirTest(t, ctx, f) -} - -func temporaryVolumeDir(t *testing.T, w *databricks.WorkspaceClient) string { - // Assume this test is run against the internal testing workspace. - path := RandomName("/Volumes/bogdanghita/default/v3_shared/cli-testing/integration-test-filer-") - - // The Files API doesn't include support for creating and removing directories yet. - // Directories are created implicitly by writing a file to a path that doesn't exist. - // We therefore assume we can use the specified path without creating it first. - t.Logf("using dbfs:%s", path) - - return path -} - -func setupFilerFilesApiTest(t *testing.T) (context.Context, filer.Filer) { - t.SkipNow() // until available on prod - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + // Assert that every file has a unique basename + basenames := map[string]struct{}{} + for _, f := range files { + basename := path.Base(f.name) + if _, ok := basenames[basename]; ok { + t.Fatalf("basename %s is not unique", basename) + } + basenames[basename] = struct{}{} + } ctx := context.Background() - w := databricks.Must(databricks.NewWorkspaceClient()) - tmpdir := temporaryVolumeDir(t, w) - f, err := filer.NewFilesClient(w, tmpdir) + wf, _ := setupWsfsExtensionsFiler(t) + + for _, f := range files { + err := wf.Write(ctx, f.name, strings.NewReader(f.content), filer.CreateParentDirectories) + require.NoError(t, err) + } + + // Read entries + entries, err := wf.ReadDir(ctx, ".") require.NoError(t, err) - return ctx, f + names := []string{} + for _, e := range entries { + names = append(names, e.Name()) + } + assert.Equal(t, []string{ + "dir1", + "foo.py", + "foo.r", + "foo.scala", + "foo.sql", + "jupyterNb.ipynb", + "jupyterNb2.ipynb", + "pyNb.py", + "rNb.r", + "scalaNb.scala", + "sqlNb.sql", + }, names) + + // Read entries in subdirectory + entries, err = wf.ReadDir(ctx, "dir1") + require.NoError(t, err) + names = []string{} + for _, e := range entries { + names = append(names, e.Name()) + } + assert.Equal(t, []string{ + "dir2", + "notebook.py", + }, names) } -func TestAccFilerFilesApiReadWrite(t *testing.T) { - ctx, f := setupFilerFilesApiTest(t) +func setupFilerWithExtensionsTest(t *testing.T) filer.Filer { + files := []struct { + name string + content string + }{ + {"foo.py", "# Databricks notebook source\nprint('first upload'))"}, + {"bar.py", "print('foo')"}, + {"jupyter.ipynb", jupyterNotebookContent1}, + {"pretender", "not a notebook"}, + {"dir/file.txt", "file content"}, + {"scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')"}, + } - // The Files API doesn't know about directories yet. - // Below is a copy of [runFilerReadWriteTest] with - // assertions that don't work commented out. + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) - var err error + for _, f := range files { + err := wf.Write(ctx, f.name, strings.NewReader(f.content), filer.CreateParentDirectories) + require.NoError(t, err) + } - // Write should fail because the root path doesn't yet exist. - // err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`)) - // assert.True(t, errors.As(err, &filer.NoSuchDirectoryError{})) - // assert.True(t, errors.Is(err, fs.ErrNotExist)) - - // Read should fail because the root path doesn't yet exist. - _, err = f.Read(ctx, "/foo/bar") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) - assert.True(t, errors.Is(err, fs.ErrNotExist)) - - // Read should fail because the path points to a directory - // err = f.Mkdir(ctx, "/dir") - // require.NoError(t, err) - // _, err = f.Read(ctx, "/dir") - // assert.ErrorIs(t, err, fs.ErrInvalid) - - // Write with CreateParentDirectories flag should succeed. - err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`), filer.CreateParentDirectories) - assert.NoError(t, err) - filerTest{t, f}.assertContents(ctx, "/foo/bar", `hello world`) - - // Write should fail because there is an existing file at the specified path. - err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`)) - assert.True(t, errors.As(err, &filer.FileAlreadyExistsError{})) - assert.True(t, errors.Is(err, fs.ErrExist)) - - // Write with OverwriteIfExists should succeed. - err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`), filer.OverwriteIfExists) - assert.NoError(t, err) - filerTest{t, f}.assertContents(ctx, "/foo/bar", `hello universe`) - - // Write should succeed if there is no existing file at the specified path. - err = f.Write(ctx, "/foo/qux", strings.NewReader(`hello universe`)) - assert.NoError(t, err) - - // Stat on a directory should succeed. - // Note: size and modification time behave differently between backends. - info, err := f.Stat(ctx, "/foo") - require.NoError(t, err) - assert.Equal(t, "foo", info.Name()) - assert.True(t, info.Mode().IsDir()) - assert.Equal(t, true, info.IsDir()) - - // Stat on a file should succeed. - // Note: size and modification time behave differently between backends. - info, err = f.Stat(ctx, "/foo/bar") - require.NoError(t, err) - assert.Equal(t, "bar", info.Name()) - assert.True(t, info.Mode().IsRegular()) - assert.Equal(t, false, info.IsDir()) - - // Delete should fail if the file doesn't exist. - err = f.Delete(ctx, "/doesnt_exist") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) - assert.True(t, errors.Is(err, fs.ErrNotExist)) - - // Stat should fail if the file doesn't exist. - _, err = f.Stat(ctx, "/doesnt_exist") - assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) - assert.True(t, errors.Is(err, fs.ErrNotExist)) - - // Delete should succeed for file that does exist. - err = f.Delete(ctx, "/foo/bar") - assert.NoError(t, err) - - // Delete should fail for a non-empty directory. - err = f.Delete(ctx, "/foo") - assert.True(t, errors.As(err, &filer.DirectoryNotEmptyError{})) - assert.True(t, errors.Is(err, fs.ErrInvalid)) - - // Delete should succeed for a non-empty directory if the DeleteRecursively flag is set. - // err = f.Delete(ctx, "/foo", filer.DeleteRecursively) - // assert.NoError(t, err) - - // Delete of the filer root should ALWAYS fail, otherwise subsequent writes would fail. - // It is not in the filer's purview to delete its root directory. - err = f.Delete(ctx, "/") - assert.True(t, errors.As(err, &filer.CannotDeleteRootError{})) - assert.True(t, errors.Is(err, fs.ErrInvalid)) + return wf } -func TestAccFilerFilesApiReadDir(t *testing.T) { - t.Skipf("no support for ReadDir yet") - ctx, f := setupFilerFilesApiTest(t) - runFilerReadDirTest(t, ctx, f) +func TestAccFilerWorkspaceFilesExtensionsRead(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf := setupFilerWithExtensionsTest(t) + + // Read contents of test fixtures as a sanity check. + filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('first upload'))") + filerTest{t, wf}.assertContents(ctx, "bar.py", "print('foo')") + filerTest{t, wf}.assertContentsJupyter(ctx, "jupyter.ipynb") + filerTest{t, wf}.assertContents(ctx, "dir/file.txt", "file content") + filerTest{t, wf}.assertContents(ctx, "scala-notebook.scala", "// Databricks notebook source\nprintln('first upload')") + filerTest{t, wf}.assertContents(ctx, "pretender", "not a notebook") + + // Read non-existent file + _, err := wf.Read(ctx, "non-existent.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not read a regular file as a notebook + _, err = wf.Read(ctx, "pretender.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, "pretender.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Read directory + _, err = wf.Read(ctx, "dir") + assert.ErrorIs(t, err, fs.ErrInvalid) + + // Ensure we do not read a Scala notebook as a Python notebook + _, err = wf.Read(ctx, "scala-notebook.py") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestAccFilerWorkspaceFilesExtensionsDelete(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf := setupFilerWithExtensionsTest(t) + + // Delete notebook + err := wf.Delete(ctx, "foo.py") + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, "foo.py") + + // Delete file + err = wf.Delete(ctx, "bar.py") + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, "bar.py") + + // Delete jupyter notebook + err = wf.Delete(ctx, "jupyter.ipynb") + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, "jupyter.ipynb") + + // Delete non-existent file + err = wf.Delete(ctx, "non-existent.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not delete a file as a notebook + err = wf.Delete(ctx, "pretender.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not delete a Scala notebook as a Python notebook + _, err = wf.Read(ctx, "scala-notebook.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Delete directory + err = wf.Delete(ctx, "dir") + assert.ErrorIs(t, err, fs.ErrInvalid) + + // Delete directory recursively + err = wf.Delete(ctx, "dir", filer.DeleteRecursively) + require.NoError(t, err) + filerTest{t, wf}.assertNotExists(ctx, "dir") +} + +func TestAccFilerWorkspaceFilesExtensionsStat(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf := setupFilerWithExtensionsTest(t) + + // Stat on a notebook + info, err := wf.Stat(ctx, "foo.py") + require.NoError(t, err) + assert.Equal(t, "foo.py", info.Name()) + assert.False(t, info.IsDir()) + + // Stat on a file + info, err = wf.Stat(ctx, "bar.py") + require.NoError(t, err) + assert.Equal(t, "bar.py", info.Name()) + assert.False(t, info.IsDir()) + + // Stat on a Jupyter notebook + info, err = wf.Stat(ctx, "jupyter.ipynb") + require.NoError(t, err) + assert.Equal(t, "jupyter.ipynb", info.Name()) + assert.False(t, info.IsDir()) + + // Stat on a directory + info, err = wf.Stat(ctx, "dir") + require.NoError(t, err) + assert.Equal(t, "dir", info.Name()) + assert.True(t, info.IsDir()) + + // Stat on a non-existent file + _, err = wf.Stat(ctx, "non-existent.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not stat a file as a notebook + _, err = wf.Stat(ctx, "pretender.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Ensure we do not stat a Scala notebook as a Python notebook + _, err = wf.Stat(ctx, "scala-notebook.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + + _, err = wf.Stat(ctx, "pretender.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestAccFilerWorkspaceFilesExtensionsErrorsOnDupName(t *testing.T) { + t.Parallel() + + tcases := []struct { + files []struct{ name, content string } + name string + }{ + { + name: "python", + files: []struct{ name, content string }{ + {"foo.py", "print('foo')"}, + {"foo.py", "# Databricks notebook source\nprint('foo')"}, + }, + }, + { + name: "r", + files: []struct{ name, content string }{ + {"foo.r", "print('foo')"}, + {"foo.r", "# Databricks notebook source\nprint('foo')"}, + }, + }, + { + name: "sql", + files: []struct{ name, content string }{ + {"foo.sql", "SELECT 'foo'"}, + {"foo.sql", "-- Databricks notebook source\nSELECT 'foo'"}, + }, + }, + { + name: "scala", + files: []struct{ name, content string }{ + {"foo.scala", "println('foo')"}, + {"foo.scala", "// Databricks notebook source\nprintln('foo')"}, + }, + }, + // We don't need to test this for ipynb notebooks. The import API + // fails when the file extension is .ipynb but the content is not a + // valid juptyer notebook. + } + + for i := range tcases { + tc := tcases[i] + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, tmpDir := setupWsfsExtensionsFiler(t) + + for _, f := range tc.files { + err := wf.Write(ctx, f.name, strings.NewReader(f.content), filer.CreateParentDirectories) + require.NoError(t, err) + } + + _, err := wf.ReadDir(ctx, ".") + assert.ErrorAs(t, err, &filer.DuplicatePathError{}) + assert.ErrorContains(t, err, fmt.Sprintf("failed to read files from the workspace file system. Duplicate paths encountered. Both NOTEBOOK at %s and FILE at %s resolve to the same name %s. Changing the name of one of these objects will resolve this issue", path.Join(tmpDir, "foo"), path.Join(tmpDir, tc.files[0].name), tc.files[0].name)) + }) + } + +} + +func TestAccWorkspaceFilesExtensionsDirectoriesAreNotNotebooks(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + // Create a directory with an extension + err := wf.Mkdir(ctx, "foo") + require.NoError(t, err) + + // Reading foo.py should fail. foo is a directory, not a notebook. + _, err = wf.Read(ctx, "foo.py") + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestAccWorkspaceFilesExtensions_ExportFormatIsPreserved(t *testing.T) { + t.Parallel() + + ctx := context.Background() + wf, _ := setupWsfsExtensionsFiler(t) + + // Case 1: Source Notebook + err := wf.Write(ctx, "foo.py", strings.NewReader("# Databricks notebook source\nprint('foo')")) + require.NoError(t, err) + + // The source notebook should exist but not the Jupyter notebook + filerTest{t, wf}.assertContents(ctx, "foo.py", "# Databricks notebook source\nprint('foo')") + _, err = wf.Stat(ctx, "foo.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, "foo.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) + err = wf.Delete(ctx, "foo.ipynb") + assert.ErrorIs(t, err, fs.ErrNotExist) + + // Case 2: Jupyter Notebook + err = wf.Write(ctx, "bar.ipynb", strings.NewReader(jupyterNotebookContent1)) + require.NoError(t, err) + + // The Jupyter notebook should exist but not the source notebook + filerTest{t, wf}.assertContentsJupyter(ctx, "bar.ipynb") + _, err = wf.Stat(ctx, "bar.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + _, err = wf.Read(ctx, "bar.py") + assert.ErrorIs(t, err, fs.ErrNotExist) + err = wf.Delete(ctx, "bar.py") + assert.ErrorIs(t, err, fs.ErrNotExist) } diff --git a/internal/fs_cat_test.go b/internal/fs_cat_test.go index 2c979ea73..6292aef18 100644 --- a/internal/fs_cat_test.go +++ b/internal/fs_cat_test.go @@ -13,31 +13,60 @@ import ( "github.com/stretchr/testify/require" ) -func TestAccFsCatForDbfs(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsCat(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) + err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) + require.NoError(t, err) - err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) - - stdout, stderr := RequireSuccessfulRun(t, "fs", "cat", "dbfs:"+path.Join(tmpDir, "a", "hello.txt")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "abc", stdout.String()) + stdout, stderr := RequireSuccessfulRun(t, "fs", "cat", path.Join(tmpDir, "hello.txt")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "abcd", stdout.String()) + }) + } } -func TestAccFsCatForDbfsOnNonExistentFile(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsCatOnADir(t *testing.T) { + t.Parallel() - _, _, err := RequireErrorRun(t, "fs", "cat", "dbfs:/non-existent-file") - assert.ErrorIs(t, err, fs.ErrNotExist) + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + f, tmpDir := tc.setupFiler(t) + err := f.Mkdir(context.Background(), "dir1") + require.NoError(t, err) + + _, _, err = RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "dir1")) + assert.ErrorAs(t, err, &filer.NotAFile{}) + }) + } +} + +func TestAccFsCatOnNonExistentFile(t *testing.T) { + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, tmpDir := tc.setupFiler(t) + + _, _, err := RequireErrorRun(t, "fs", "cat", path.Join(tmpDir, "non-existent-file")) + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } func TestAccFsCatForDbfsInvalidScheme(t *testing.T) { @@ -65,6 +94,3 @@ func TestAccFsCatDoesNotSupportOutputModeJson(t *testing.T) { _, _, err = RequireErrorRun(t, "fs", "cat", "dbfs:"+path.Join(tmpDir, "hello.txt"), "--output=json") assert.ErrorContains(t, err, "json output not supported") } - -// TODO: Add test asserting an error when cat is called on an directory. Need this to be -// fixed in the SDK first (https://github.com/databricks/databricks-sdk-go/issues/414) diff --git a/internal/fs_cp_test.go b/internal/fs_cp_test.go index ab177a36f..b69735bc0 100644 --- a/internal/fs_cp_test.go +++ b/internal/fs_cp_test.go @@ -2,16 +2,15 @@ package internal import ( "context" - "fmt" "io" "path" "path/filepath" + "regexp" "runtime" "strings" "testing" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -60,84 +59,124 @@ func assertTargetDir(t *testing.T, ctx context.Context, f filer.Filer) { assertFileContent(t, ctx, f, "a/b/c/hello.txt", "hello, world\n") } -func setupLocalFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - tmp := t.TempDir() - f, err := filer.NewLocalClient(tmp) - require.NoError(t, err) - - return f, path.Join(filepath.ToSlash(tmp)) -} - -func setupDbfsFiler(t *testing.T) (filer.Filer, string) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - - return f, path.Join("dbfs:/", tmpDir) -} - type cpTest struct { + name string setupSource func(*testing.T) (filer.Filer, string) setupTarget func(*testing.T) (filer.Filer, string) } -func setupTable() []cpTest { +func copyTests() []cpTest { return []cpTest{ - {setupSource: setupLocalFiler, setupTarget: setupLocalFiler}, - {setupSource: setupLocalFiler, setupTarget: setupDbfsFiler}, - {setupSource: setupDbfsFiler, setupTarget: setupLocalFiler}, - {setupSource: setupDbfsFiler, setupTarget: setupDbfsFiler}, + // source: local file system + { + name: "local to local", + setupSource: setupLocalFiler, + setupTarget: setupLocalFiler, + }, + { + name: "local to dbfs", + setupSource: setupLocalFiler, + setupTarget: setupDbfsFiler, + }, + { + name: "local to uc-volumes", + setupSource: setupLocalFiler, + setupTarget: setupUcVolumesFiler, + }, + + // source: dbfs + { + name: "dbfs to local", + setupSource: setupDbfsFiler, + setupTarget: setupLocalFiler, + }, + { + name: "dbfs to dbfs", + setupSource: setupDbfsFiler, + setupTarget: setupDbfsFiler, + }, + { + name: "dbfs to uc-volumes", + setupSource: setupDbfsFiler, + setupTarget: setupUcVolumesFiler, + }, + + // source: uc-volumes + { + name: "uc-volumes to local", + setupSource: setupUcVolumesFiler, + setupTarget: setupLocalFiler, + }, + { + name: "uc-volumes to dbfs", + setupSource: setupUcVolumesFiler, + setupTarget: setupDbfsFiler, + }, + { + name: "uc-volumes to uc-volumes", + setupSource: setupUcVolumesFiler, + setupTarget: setupUcVolumesFiler, + }, } } func TestAccFsCpDir(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - RequireSuccessfulRun(t, "fs", "cp", "-r", sourceDir, targetDir) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - assertTargetDir(t, ctx, targetFiler) + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + + assertTargetDir(t, context.Background(), targetFiler) + }) } } func TestAccFsCpFileToFile(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceFile(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - assertTargetFile(t, ctx, targetFiler, "bar.txt") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceFile(t, context.Background(), sourceFiler) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt")) + + assertTargetFile(t, context.Background(), targetFiler, "bar.txt") + }) } } func TestAccFsCpFileToDir(t *testing.T) { - ctx := context.Background() - table := setupTable() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceFile(t, ctx, sourceFiler) + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) + for _, testCase := range copyTests() { + tc := testCase - assertTargetFile(t, ctx, targetFiler, "foo.txt") + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceFile(t, context.Background(), sourceFiler) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir) + + assertTargetFile(t, context.Background(), targetFiler, "foo.txt") + }) } } @@ -158,125 +197,161 @@ func TestAccFsCpFileToDirForWindowsPaths(t *testing.T) { } func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") - assertFileContent(t, ctx, targetFiler, "a/b/c/hello.txt", "this should not be overwritten") - assertFileContent(t, ctx, targetFiler, "query.sql", "SELECT 1") - assertFileContent(t, ctx, targetFiler, "pyNb.py", "# Databricks notebook source\nprint(123)") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive") + assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") + assertFileContent(t, context.Background(), targetFiler, "query.sql", "SELECT 1") + assertFileContent(t, context.Background(), targetFiler, "pyNb.py", "# Databricks notebook source\nprint(123)") + }) } } func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) - assertFileContent(t, ctx, targetFiler, "a/b/c/hello.txt", "this should not be overwritten") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c")) + assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "this should not be overwritten") + }) } } func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hola.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/hola.txt"), "--recursive") - assertFileContent(t, ctx, targetFiler, "a/b/c/hola.txt", "this should not be overwritten") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/dontoverwrite.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/dontoverwrite.txt")) + assertFileContent(t, context.Background(), targetFiler, "a/b/c/dontoverwrite.txt", "this should not be overwritten") + }) } } func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this will be overwritten"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") - assertTargetDir(t, ctx, targetFiler) + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite") + assertTargetDir(t, context.Background(), targetFiler) + }) } } func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hola.txt", strings.NewReader("this will be overwritten. Such is life."), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/hola.txt"), "--overwrite") - assertFileContent(t, ctx, targetFiler, "a/b/c/hola.txt", "hello, world\n") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/overwritten.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/overwritten.txt"), "--overwrite") + assertFileContent(t, context.Background(), targetFiler, "a/b/c/overwritten.txt", "hello, world\n") + }) } } func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this will be overwritten :') "), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--recursive", "--overwrite") - assertFileContent(t, ctx, targetFiler, "a/b/c/hello.txt", "hello, world\n") + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "a/b/c/hello.txt", strings.NewReader("this should be overwritten"), filer.CreateParentDirectories) + require.NoError(t, err) + + RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--overwrite") + assertFileContent(t, context.Background(), targetFiler, "a/b/c/hello.txt", "hello, world\n") + }) } } func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Parallel() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - _, _, err = RequireErrorRun(t, "fs", "cp", "dbfs:"+tmpDir, "dbfs:/tmp") - assert.Equal(t, fmt.Sprintf("source path %s is a directory. Please specify the --recursive flag", tmpDir), err.Error()) + _, tmpDir := tc.setupFiler(t) + + _, _, err := RequireErrorRun(t, "fs", "cp", path.Join(tmpDir), path.Join(tmpDir, "foobar")) + r := regexp.MustCompile("source path .* is a directory. Please specify the --recursive flag") + assert.Regexp(t, r, err.Error()) + }) + } } func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) { @@ -287,20 +362,24 @@ func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) { } func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) { - ctx := context.Background() - table := setupTable() + t.Parallel() - for _, row := range table { - sourceFiler, sourceDir := row.setupSource(t) - targetFiler, targetDir := row.setupTarget(t) - setupSourceDir(t, ctx, sourceFiler) + for _, testCase := range copyTests() { + tc := testCase - // Write a conflicting file to target - err := targetFiler.Write(ctx, "my_target", strings.NewReader("I'll block any attempts to recursively copy"), filer.CreateParentDirectories) - require.NoError(t, err) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - _, _, err = RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive", "--overwrite") - assert.Error(t, err) + sourceFiler, sourceDir := tc.setupSource(t) + targetFiler, targetDir := tc.setupTarget(t) + setupSourceDir(t, context.Background(), sourceFiler) + + // Write a conflicting file to target + err := targetFiler.Write(context.Background(), "my_target", strings.NewReader("I'll block any attempts to recursively copy"), filer.CreateParentDirectories) + require.NoError(t, err) + + _, _, err = RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive") + assert.Error(t, err) + }) } - } diff --git a/internal/fs_ls_test.go b/internal/fs_ls_test.go index 9e02b09cc..994a4a425 100644 --- a/internal/fs_ls_test.go +++ b/internal/fs_ls_test.go @@ -11,131 +11,163 @@ import ( _ "github.com/databricks/cli/cmd/fs" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsLsForDbfs(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) - - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - - err = f.Mkdir(ctx, "a") - require.NoError(t, err) - err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) - err = f.Write(ctx, "bye.txt", strings.NewReader("def")) - require.NoError(t, err) - - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", "dbfs:"+tmpDir, "--output=json") - assert.Equal(t, "", stderr.String()) - var parsedStdout []map[string]any - err = json.Unmarshal(stdout.Bytes(), &parsedStdout) - require.NoError(t, err) - - // assert on ls output - assert.Len(t, parsedStdout, 2) - assert.Equal(t, "a", parsedStdout[0]["name"]) - assert.Equal(t, true, parsedStdout[0]["is_directory"]) - assert.Equal(t, float64(0), parsedStdout[0]["size"]) - assert.Equal(t, "bye.txt", parsedStdout[1]["name"]) - assert.Equal(t, false, parsedStdout[1]["is_directory"]) - assert.Equal(t, float64(3), parsedStdout[1]["size"]) +type fsTest struct { + name string + setupFiler func(t *testing.T) (filer.Filer, string) } -func TestAccFsLsForDbfsWithAbsolutePaths(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) - - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - - err = f.Mkdir(ctx, "a") - require.NoError(t, err) - err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) - err = f.Write(ctx, "bye.txt", strings.NewReader("def")) - require.NoError(t, err) - - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", "dbfs:"+tmpDir, "--output=json", "--absolute") - assert.Equal(t, "", stderr.String()) - var parsedStdout []map[string]any - err = json.Unmarshal(stdout.Bytes(), &parsedStdout) - require.NoError(t, err) - - // assert on ls output - assert.Len(t, parsedStdout, 2) - assert.Equal(t, path.Join("dbfs:", tmpDir, "a"), parsedStdout[0]["name"]) - assert.Equal(t, true, parsedStdout[0]["is_directory"]) - assert.Equal(t, float64(0), parsedStdout[0]["size"]) - - assert.Equal(t, path.Join("dbfs:", tmpDir, "bye.txt"), parsedStdout[1]["name"]) - assert.Equal(t, false, parsedStdout[1]["is_directory"]) - assert.Equal(t, float64(3), parsedStdout[1]["size"]) +var fsTests = []fsTest{ + { + name: "dbfs", + setupFiler: setupDbfsFiler, + }, + { + name: "uc-volumes", + setupFiler: setupUcVolumesFiler, + }, } -func TestAccFsLsForDbfsOnFile(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() +func setupLsFiles(t *testing.T, f filer.Filer) { + err := f.Write(context.Background(), "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) require.NoError(t, err) - - tmpDir := TemporaryDbfsDir(t, w) - - f, err := filer.NewDbfsClient(w, tmpDir) + err = f.Write(context.Background(), "bye.txt", strings.NewReader("def")) require.NoError(t, err) - - err = f.Mkdir(ctx, "a") - require.NoError(t, err) - err = f.Write(ctx, "a/hello.txt", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) - - _, _, err = RequireErrorRun(t, "fs", "ls", "dbfs:"+path.Join(tmpDir, "a", "hello.txt"), "--output=json") - assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error()) } -func TestAccFsLsForDbfsOnEmptyDir(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsLs(t *testing.T) { + t.Parallel() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", "dbfs:"+tmpDir, "--output=json") - assert.Equal(t, "", stderr.String()) - var parsedStdout []map[string]any - err = json.Unmarshal(stdout.Bytes(), &parsedStdout) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) + setupLsFiles(t, f) - // assert on ls output - assert.Equal(t, 0, len(parsedStdout)) + stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + assert.Equal(t, "", stderr.String()) + + var parsedStdout []map[string]any + err := json.Unmarshal(stdout.Bytes(), &parsedStdout) + require.NoError(t, err) + + // assert on ls output + assert.Len(t, parsedStdout, 2) + + assert.Equal(t, "a", parsedStdout[0]["name"]) + assert.Equal(t, true, parsedStdout[0]["is_directory"]) + assert.Equal(t, float64(0), parsedStdout[0]["size"]) + + assert.Equal(t, "bye.txt", parsedStdout[1]["name"]) + assert.Equal(t, false, parsedStdout[1]["is_directory"]) + assert.Equal(t, float64(3), parsedStdout[1]["size"]) + }) + } } -func TestAccFsLsForDbfsForNonexistingDir(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsLsWithAbsolutePaths(t *testing.T) { + t.Parallel() - _, _, err := RequireErrorRun(t, "fs", "ls", "dbfs:/john-cena", "--output=json") - assert.ErrorIs(t, err, fs.ErrNotExist) + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + f, tmpDir := tc.setupFiler(t) + setupLsFiles(t, f) + + stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json", "--absolute") + assert.Equal(t, "", stderr.String()) + + var parsedStdout []map[string]any + err := json.Unmarshal(stdout.Bytes(), &parsedStdout) + require.NoError(t, err) + + // assert on ls output + assert.Len(t, parsedStdout, 2) + + assert.Equal(t, path.Join(tmpDir, "a"), parsedStdout[0]["name"]) + assert.Equal(t, true, parsedStdout[0]["is_directory"]) + assert.Equal(t, float64(0), parsedStdout[0]["size"]) + + assert.Equal(t, path.Join(tmpDir, "bye.txt"), parsedStdout[1]["name"]) + assert.Equal(t, false, parsedStdout[1]["is_directory"]) + assert.Equal(t, float64(3), parsedStdout[1]["size"]) + }) + } +} + +func TestAccFsLsOnFile(t *testing.T) { + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + f, tmpDir := tc.setupFiler(t) + setupLsFiles(t, f) + + _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "a", "hello.txt"), "--output=json") + assert.Regexp(t, regexp.MustCompile("not a directory: .*/a/hello.txt"), err.Error()) + assert.ErrorAs(t, err, &filer.NotADirectory{}) + }) + } +} + +func TestAccFsLsOnEmptyDir(t *testing.T) { + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, tmpDir := tc.setupFiler(t) + + stdout, stderr := RequireSuccessfulRun(t, "fs", "ls", tmpDir, "--output=json") + assert.Equal(t, "", stderr.String()) + var parsedStdout []map[string]any + err := json.Unmarshal(stdout.Bytes(), &parsedStdout) + require.NoError(t, err) + + // assert on ls output + assert.Equal(t, 0, len(parsedStdout)) + }) + } +} + +func TestAccFsLsForNonexistingDir(t *testing.T) { + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, tmpDir := tc.setupFiler(t) + + _, _, err := RequireErrorRun(t, "fs", "ls", path.Join(tmpDir, "nonexistent"), "--output=json") + assert.ErrorIs(t, err, fs.ErrNotExist) + assert.Regexp(t, regexp.MustCompile("no such directory: .*/nonexistent"), err.Error()) + }) + } } func TestAccFsLsWithoutScheme(t *testing.T) { + t.Parallel() + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) - _, _, err := RequireErrorRun(t, "fs", "ls", "/ray-mysterio", "--output=json") + _, _, err := RequireErrorRun(t, "fs", "ls", "/path-without-a-dbfs-scheme", "--output=json") assert.ErrorIs(t, err, fs.ErrNotExist) } diff --git a/internal/fs_mkdir_test.go b/internal/fs_mkdir_test.go index af0e9d187..dd75c7c32 100644 --- a/internal/fs_mkdir_test.go +++ b/internal/fs_mkdir_test.go @@ -8,110 +8,127 @@ import ( "testing" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsMkdirCreatesDirectory(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsMkdir(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) - // create directory "a" - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "a")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) + // create directory "a" + stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) - // assert directory "a" is created - info, err := f.Stat(ctx, "a") - require.NoError(t, err) - assert.Equal(t, "a", info.Name()) - assert.Equal(t, true, info.IsDir()) + // assert directory "a" is created + info, err := f.Stat(context.Background(), "a") + require.NoError(t, err) + assert.Equal(t, "a", info.Name()) + assert.Equal(t, true, info.IsDir()) + }) + } } -func TestAccFsMkdirCreatesMultipleDirectories(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsMkdirCreatesIntermediateDirectories(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) - // create directory /a/b/c - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "a", "b", "c")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) + // create directory "a/b/c" + stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a", "b", "c")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) - // assert directory "a" is created - infoA, err := f.Stat(ctx, "a") - require.NoError(t, err) - assert.Equal(t, "a", infoA.Name()) - assert.Equal(t, true, infoA.IsDir()) + // assert directory "a" is created + infoA, err := f.Stat(context.Background(), "a") + require.NoError(t, err) + assert.Equal(t, "a", infoA.Name()) + assert.Equal(t, true, infoA.IsDir()) - // assert directory "b" is created - infoB, err := f.Stat(ctx, "a/b") - require.NoError(t, err) - assert.Equal(t, "b", infoB.Name()) - assert.Equal(t, true, infoB.IsDir()) + // assert directory "b" is created + infoB, err := f.Stat(context.Background(), "a/b") + require.NoError(t, err) + assert.Equal(t, "b", infoB.Name()) + assert.Equal(t, true, infoB.IsDir()) - // assert directory "c" is created - infoC, err := f.Stat(ctx, "a/b/c") - require.NoError(t, err) - assert.Equal(t, "c", infoC.Name()) - assert.Equal(t, true, infoC.IsDir()) + // assert directory "c" is created + infoC, err := f.Stat(context.Background(), "a/b/c") + require.NoError(t, err) + assert.Equal(t, "c", infoC.Name()) + assert.Equal(t, true, infoC.IsDir()) + }) + } } func TestAccFsMkdirWhenDirectoryAlreadyExists(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - // create directory "a" - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - err = f.Mkdir(ctx, "a") - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) - // assert run is successful without any errors - stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "a")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) + // create directory "a" + err := f.Mkdir(context.Background(), "a") + require.NoError(t, err) + + // assert run is successful without any errors + stdout, stderr := RequireSuccessfulRun(t, "fs", "mkdir", path.Join(tmpDir, "a")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) + }) + } } func TestAccFsMkdirWhenFileExistsAtPath(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + t.Run("dbfs", func(t *testing.T) { + t.Parallel() - tmpDir := TemporaryDbfsDir(t, w) + f, tmpDir := setupDbfsFiler(t) - // create file hello - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) - err = f.Write(ctx, "hello", strings.NewReader("abc")) - require.NoError(t, err) + // create file "hello" + err := f.Write(context.Background(), "hello", strings.NewReader("abc")) + require.NoError(t, err) - // assert run fails - _, _, err = RequireErrorRun(t, "fs", "mkdir", "dbfs:"+path.Join(tmpDir, "hello")) - // Different cloud providers return different errors. - regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$`) - assert.Regexp(t, regex, err.Error()) + // assert mkdir fails + _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + + // Different cloud providers return different errors. + regex := regexp.MustCompile(`(^|: )Path is a file: .*$|(^|: )Cannot create directory .* because .* is an existing file\.$|(^|: )mkdirs\(hadoopPath: .*, permission: rwxrwxrwx\): failed$`) + assert.Regexp(t, regex, err.Error()) + }) + + t.Run("uc-volumes", func(t *testing.T) { + t.Parallel() + + f, tmpDir := setupUcVolumesFiler(t) + + // create file "hello" + err := f.Write(context.Background(), "hello", strings.NewReader("abc")) + require.NoError(t, err) + + // assert mkdir fails + _, _, err = RequireErrorRun(t, "fs", "mkdir", path.Join(tmpDir, "hello")) + + assert.ErrorAs(t, err, &filer.FileAlreadyExistsError{}) + }) } diff --git a/internal/fs_rm_test.go b/internal/fs_rm_test.go index d70827d1a..e86f5713b 100644 --- a/internal/fs_rm_test.go +++ b/internal/fs_rm_test.go @@ -8,139 +8,150 @@ import ( "testing" "github.com/databricks/cli/libs/filer" - "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func TestAccFsRmForFile(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsRmFile(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + // Create a file + f, tmpDir := tc.setupFiler(t) + err := f.Write(context.Background(), "hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) + require.NoError(t, err) - // create file to delete - err = f.Write(ctx, "hello.txt", strings.NewReader("abc")) - require.NoError(t, err) + // Check file was created + _, err = f.Stat(context.Background(), "hello.txt") + assert.NoError(t, err) - // check file was created - info, err := f.Stat(ctx, "hello.txt") - require.NoError(t, err) - require.Equal(t, "hello.txt", info.Name()) - require.Equal(t, info.IsDir(), false) + // Run rm command + stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "hello.txt")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) - // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", "dbfs:"+path.Join(tmpDir, "hello.txt")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) - - // assert file was deleted - _, err = f.Stat(ctx, "hello.txt") - assert.ErrorIs(t, err, fs.ErrNotExist) + // Assert file was deleted + _, err = f.Stat(context.Background(), "hello.txt") + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } -func TestAccFsRmForEmptyDirectory(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsRmEmptyDir(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + // Create a directory + f, tmpDir := tc.setupFiler(t) + err := f.Mkdir(context.Background(), "a") + require.NoError(t, err) - // create directory to delete - err = f.Mkdir(ctx, "avacado") - require.NoError(t, err) + // Check directory was created + _, err = f.Stat(context.Background(), "a") + assert.NoError(t, err) - // check directory was created - info, err := f.Stat(ctx, "avacado") - require.NoError(t, err) - require.Equal(t, "avacado", info.Name()) - require.Equal(t, info.IsDir(), true) + // Run rm command + stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a")) + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) - // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", "dbfs:"+path.Join(tmpDir, "avacado")) - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) - - // assert directory was deleted - _, err = f.Stat(ctx, "avacado") - assert.ErrorIs(t, err, fs.ErrNotExist) + // Assert directory was deleted + _, err = f.Stat(context.Background(), "a") + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } -func TestAccFsRmForNonEmptyDirectory(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsRmNonEmptyDirectory(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + // Create a directory + f, tmpDir := tc.setupFiler(t) + err := f.Mkdir(context.Background(), "a") + require.NoError(t, err) - // create file in dir - err = f.Write(ctx, "avacado/guacamole", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) + // Create a file in the directory + err = f.Write(context.Background(), "a/hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) + require.NoError(t, err) - // check file was created - info, err := f.Stat(ctx, "avacado/guacamole") - require.NoError(t, err) - require.Equal(t, "guacamole", info.Name()) - require.Equal(t, info.IsDir(), false) + // Check file was created + _, err = f.Stat(context.Background(), "a/hello.txt") + assert.NoError(t, err) - // Run rm command - _, _, err = RequireErrorRun(t, "fs", "rm", "dbfs:"+path.Join(tmpDir, "avacado")) - assert.ErrorIs(t, err, fs.ErrInvalid) - assert.ErrorContains(t, err, "directory not empty") + // Run rm command + _, _, err = RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "a")) + assert.ErrorIs(t, err, fs.ErrInvalid) + assert.ErrorAs(t, err, &filer.DirectoryNotEmptyError{}) + }) + } } func TestAccFsRmForNonExistentFile(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + t.Parallel() + + for _, testCase := range fsTests { + tc := testCase + + t.Run(tc.name, func(t *testing.T) { + t.Parallel() + + _, tmpDir := tc.setupFiler(t) + + // Expect error if file does not exist + _, _, err := RequireErrorRun(t, "fs", "rm", path.Join(tmpDir, "does-not-exist")) + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } - // Expect error if file does not exist - _, _, err := RequireErrorRun(t, "fs", "rm", "dbfs:/does-not-exist") - assert.ErrorIs(t, err, fs.ErrNotExist) } -func TestAccFsRmForNonEmptyDirectoryWithRecursiveFlag(t *testing.T) { - t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) +func TestAccFsRmDirRecursively(t *testing.T) { + t.Parallel() - ctx := context.Background() - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) + for _, testCase := range fsTests { + tc := testCase - tmpDir := TemporaryDbfsDir(t, w) + t.Run(tc.name, func(t *testing.T) { + t.Parallel() - f, err := filer.NewDbfsClient(w, tmpDir) - require.NoError(t, err) + f, tmpDir := tc.setupFiler(t) - // create file in dir - err = f.Write(ctx, "avacado/guacamole", strings.NewReader("abc"), filer.CreateParentDirectories) - require.NoError(t, err) + // Create a directory + err := f.Mkdir(context.Background(), "a") + require.NoError(t, err) - // check file was created - info, err := f.Stat(ctx, "avacado/guacamole") - require.NoError(t, err) - require.Equal(t, "guacamole", info.Name()) - require.Equal(t, info.IsDir(), false) + // Create a file in the directory + err = f.Write(context.Background(), "a/hello.txt", strings.NewReader("abcd"), filer.CreateParentDirectories) + require.NoError(t, err) - // Run rm command - stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", "dbfs:"+path.Join(tmpDir, "avacado"), "--recursive") - assert.Equal(t, "", stderr.String()) - assert.Equal(t, "", stdout.String()) + // Check file was created + _, err = f.Stat(context.Background(), "a/hello.txt") + assert.NoError(t, err) - // assert directory was deleted - _, err = f.Stat(ctx, "avacado") - assert.ErrorIs(t, err, fs.ErrNotExist) + // Run rm command + stdout, stderr := RequireSuccessfulRun(t, "fs", "rm", path.Join(tmpDir, "a"), "--recursive") + assert.Equal(t, "", stderr.String()) + assert.Equal(t, "", stdout.String()) + + // Assert directory was deleted + _, err = f.Stat(context.Background(), "a") + assert.ErrorIs(t, err, fs.ErrNotExist) + }) + } } diff --git a/internal/helpers.go b/internal/helpers.go index 22e38e211..972a2322b 100644 --- a/internal/helpers.go +++ b/internal/helpers.go @@ -5,10 +5,13 @@ import ( "bytes" "context" "encoding/json" + "errors" "fmt" "io" "math/rand" + "net/http" "os" + "path" "path/filepath" "reflect" "strings" @@ -16,11 +19,16 @@ import ( "testing" "time" + "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/cli/cmd" _ "github.com/databricks/cli/cmd/version" "github.com/databricks/cli/libs/cmdio" + "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/service/compute" "github.com/databricks/databricks-sdk-go/service/files" "github.com/databricks/databricks-sdk-go/service/jobs" @@ -100,7 +108,12 @@ func (t *cobraTestRunner) registerFlagCleanup(c *cobra.Command) { // Find target command that will be run. Example: if the command run is `databricks fs cp`, // target command corresponds to `cp` targetCmd, _, err := c.Find(t.args) - require.NoError(t, err) + if err != nil && strings.HasPrefix(err.Error(), "unknown command") { + // even if command is unknown, we can proceed + require.NotNil(t, targetCmd) + } else { + require.NoError(t, err) + } // Force initialization of default flags. // These are initialized by cobra at execution time and would otherwise @@ -131,6 +144,14 @@ func (t *cobraTestRunner) WaitForTextPrinted(text string, timeout time.Duration) }, timeout, 50*time.Millisecond) } +func (t *cobraTestRunner) WaitForOutput(text string, timeout time.Duration) { + require.Eventually(t.T, func() bool { + currentStdout := t.stdout.String() + currentErrout := t.stderr.String() + return strings.Contains(currentStdout, text) || strings.Contains(currentErrout, text) + }, timeout, 50*time.Millisecond) +} + func (t *cobraTestRunner) WithStdin() { reader, writer := io.Pipe() t.stdinR = reader @@ -156,22 +177,28 @@ func (t *cobraTestRunner) RunBackground() { var stdoutW, stderrW io.WriteCloser stdoutR, stdoutW = io.Pipe() stderrR, stderrW = io.Pipe() - root := cmd.New(t.ctx) - root.SetOut(stdoutW) - root.SetErr(stderrW) - root.SetArgs(t.args) + ctx := cmdio.NewContext(t.ctx, &cmdio.Logger{ + Mode: flags.ModeAppend, + Reader: bufio.Reader{}, + Writer: stderrW, + }) + + cli := cmd.New(ctx) + cli.SetOut(stdoutW) + cli.SetErr(stderrW) + cli.SetArgs(t.args) if t.stdinW != nil { - root.SetIn(t.stdinR) + cli.SetIn(t.stdinR) } // Register cleanup function to restore flags to their original values // once test has been executed. This is needed because flag values reside // in a global singleton data-structure, and thus subsequent tests might // otherwise interfere with each other - t.registerFlagCleanup(root) + t.registerFlagCleanup(cli) errch := make(chan error) - ctx, cancel := context.WithCancel(t.ctx) + ctx, cancel := context.WithCancel(ctx) // Tee stdout/stderr to buffers. stdoutR = io.TeeReader(stdoutR, &t.stdout) @@ -184,7 +211,7 @@ func (t *cobraTestRunner) RunBackground() { // Run command in background. go func() { - cmd, err := root.ExecuteContextC(ctx) + err := root.Execute(ctx, cli) if err != nil { t.Logf("Error running command: %s", err) } @@ -217,7 +244,7 @@ func (t *cobraTestRunner) RunBackground() { // These commands are globals so we have to clean up to the best of our ability after each run. // See https://github.com/spf13/cobra/blob/a6f198b635c4b18fff81930c40d464904e55b161/command.go#L1062-L1066 //lint:ignore SA1012 cobra sets the context and doesn't clear it - cmd.SetContext(nil) + cli.SetContext(nil) // Make caller aware of error. errch <- err @@ -444,6 +471,40 @@ func TemporaryDbfsDir(t *testing.T, w *databricks.WorkspaceClient) string { return path } +// Create a new UC volume in a catalog called "main" in the workspace. +func TemporaryUcVolume(t *testing.T, w *databricks.WorkspaceClient) string { + ctx := context.Background() + + // Create a schema + schema, err := w.Schemas.Create(ctx, catalog.CreateSchema{ + CatalogName: "main", + Name: RandomName("test-schema-"), + }) + require.NoError(t, err) + t.Cleanup(func() { + w.Schemas.Delete(ctx, catalog.DeleteSchemaRequest{ + FullName: schema.FullName, + }) + }) + + // Create a volume + volume, err := w.Volumes.Create(ctx, catalog.CreateVolumeRequestContent{ + CatalogName: "main", + SchemaName: schema.Name, + Name: "my-volume", + VolumeType: catalog.VolumeTypeManaged, + }) + require.NoError(t, err) + t.Cleanup(func() { + w.Volumes.Delete(ctx, catalog.DeleteVolumeRequest{ + Name: volume.FullName, + }) + }) + + return path.Join("/Volumes", "main", schema.Name, volume.Name) + +} + func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { ctx := context.Background() me, err := w.CurrentUser.Me(ctx) @@ -476,8 +537,79 @@ func TemporaryRepo(t *testing.T, w *databricks.WorkspaceClient) string { func GetNodeTypeId(env string) string { if env == "gcp" { return "n1-standard-4" - } else if env == "aws" { + } else if env == "aws" || env == "ucws" { + // aws-prod-ucws has CLOUD_ENV set to "ucws" return "i3.xlarge" } return "Standard_DS4_v2" } + +func setupLocalFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + tmp := t.TempDir() + f, err := filer.NewLocalClient(tmp) + require.NoError(t, err) + + return f, path.Join(filepath.ToSlash(tmp)) +} + +func setupWsfsFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + tmpdir := TemporaryWorkspaceDir(t, w) + f, err := filer.NewWorkspaceFilesClient(w, tmpdir) + require.NoError(t, err) + + // Check if we can use this API here, skip test if we cannot. + _, err = f.Read(ctx, "we_use_this_call_to_test_if_this_api_is_enabled") + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.StatusCode == http.StatusBadRequest { + t.Skip(aerr.Message) + } + + return f, tmpdir +} + +func setupWsfsExtensionsFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + w := databricks.Must(databricks.NewWorkspaceClient()) + tmpdir := TemporaryWorkspaceDir(t, w) + f, err := filer.NewWorkspaceFilesExtensionsClient(w, tmpdir) + require.NoError(t, err) + + return f, tmpdir +} + +func setupDbfsFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + tmpDir := TemporaryDbfsDir(t, w) + f, err := filer.NewDbfsClient(w, tmpDir) + require.NoError(t, err) + + return f, path.Join("dbfs:/", tmpDir) +} + +func setupUcVolumesFiler(t *testing.T) (filer.Filer, string) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + if os.Getenv("TEST_METASTORE_ID") == "" { + t.Skip("Skipping tests that require a UC Volume when metastore id is not set.") + } + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + tmpDir := TemporaryUcVolume(t, w) + f, err := filer.NewFilesClient(w, tmpDir) + require.NoError(t, err) + + return f, path.Join("dbfs:/", tmpDir) +} diff --git a/internal/init_test.go b/internal/init_test.go index c4c3d6d84..c3cb0127e 100644 --- a/internal/init_test.go +++ b/internal/init_test.go @@ -2,11 +2,16 @@ package internal import ( "context" + "encoding/json" + "fmt" "os" "path/filepath" "strconv" + "strings" "testing" + "github.com/databricks/cli/bundle/config" + "github.com/databricks/cli/internal/testutil" "github.com/databricks/cli/libs/auth" "github.com/databricks/databricks-sdk-go" "github.com/stretchr/testify/assert" @@ -21,6 +26,80 @@ func TestAccBundleInitErrorOnUnknownFields(t *testing.T) { assert.EqualError(t, err, "failed to compute file content for bar.tmpl. variable \"does_not_exist\" not defined") } +// This test tests the MLOps Stacks DAB e2e and thus there's a couple of special +// considerations to take note of: +// +// 1. Upstream changes to the MLOps Stacks DAB can cause this test to fail. +// In which case we should do one of: +// (a) Update this test to reflect the changes +// (b) Update the MLOps Stacks DAB to not break this test. Skip this test +// temporarily until the MLOps Stacks DAB is updated +// +// 2. While rare and to be avoided if possible, the CLI reserves the right to +// make changes that can break the MLOps Stacks DAB. In which case we should +// skip this test until the MLOps Stacks DAB is updated to work again. +func TestAccBundleInitOnMlopsStacks(t *testing.T) { + t.Parallel() + env := testutil.GetCloud(t).String() + + tmpDir1 := t.TempDir() + tmpDir2 := t.TempDir() + + w, err := databricks.NewWorkspaceClient(&databricks.Config{}) + require.NoError(t, err) + + projectName := RandomName("project_name_") + + // Create a config file with the project name and root dir + initConfig := map[string]string{ + "input_project_name": projectName, + "input_root_dir": "repo_name", + "input_include_models_in_unity_catalog": "no", + "input_cloud": strings.ToLower(env), + } + b, err := json.Marshal(initConfig) + require.NoError(t, err) + os.WriteFile(filepath.Join(tmpDir1, "config.json"), b, 0644) + + // Run bundle init + assert.NoFileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) + RequireSuccessfulRun(t, "bundle", "init", "mlops-stacks", "--output-dir", tmpDir2, "--config-file", filepath.Join(tmpDir1, "config.json")) + + // Assert that the README.md file was created + assert.FileExists(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md")) + assertLocalFileContents(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md"), fmt.Sprintf("# %s", projectName)) + + // Validate the stack + testutil.Chdir(t, filepath.Join(tmpDir2, "repo_name", projectName)) + RequireSuccessfulRun(t, "bundle", "validate") + + // Deploy the stack + RequireSuccessfulRun(t, "bundle", "deploy") + t.Cleanup(func() { + // Delete the stack + RequireSuccessfulRun(t, "bundle", "destroy", "--auto-approve") + }) + + // Get summary of the bundle deployment + stdout, _ := RequireSuccessfulRun(t, "bundle", "summary", "--output", "json") + summary := &config.Root{} + err = json.Unmarshal(stdout.Bytes(), summary) + require.NoError(t, err) + + // Assert resource Ids are not empty + assert.NotEmpty(t, summary.Resources.Experiments["experiment"].ID) + assert.NotEmpty(t, summary.Resources.Models["model"].ID) + assert.NotEmpty(t, summary.Resources.Jobs["batch_inference_job"].ID) + assert.NotEmpty(t, summary.Resources.Jobs["model_training_job"].ID) + + // Assert the batch inference job actually exists + batchJobId, err := strconv.ParseInt(summary.Resources.Jobs["batch_inference_job"].ID, 10, 64) + require.NoError(t, err) + job, err := w.Jobs.GetByJobId(context.Background(), batchJobId) + assert.NoError(t, err) + assert.Equal(t, fmt.Sprintf("dev-%s-batch-inference-job", projectName), job.Settings.Name) +} + func TestAccBundleInitHelpers(t *testing.T) { env := GetEnvOrSkipTest(t, "CLOUD_ENV") t.Log(env) @@ -59,7 +138,7 @@ func TestAccBundleInitHelpers(t *testing.T) { }, { funcName: "{{is_service_principal}}", - expected: strconv.FormatBool(auth.IsServicePrincipal(me.Id)), + expected: strconv.FormatBool(auth.IsServicePrincipal(me.UserName)), }, { funcName: "{{smallest_node_type}}", diff --git a/internal/mocks/README.md b/internal/mocks/README.md index 231bbfaa4..70ce54546 100644 --- a/internal/mocks/README.md +++ b/internal/mocks/README.md @@ -4,4 +4,7 @@ Use this directory to store mocks for interfaces in this repository. Please use the same package structure for the mocks as the interface it is mocking. -See https://github.com/uber-go/mock for more information on how to generate mocks. +Refresh mocks by running: +``` +go run github.com/vektra/mockery/v2@b9df18e0f7b94f0bc11af3f379c8a9aea1e1e8da +``` diff --git a/internal/mocks/libs/filer/filer_mock.go b/internal/mocks/libs/filer/filer_mock.go deleted file mode 100644 index ef00976a2..000000000 --- a/internal/mocks/libs/filer/filer_mock.go +++ /dev/null @@ -1,139 +0,0 @@ -// Code generated by MockGen. DO NOT EDIT. -// Source: github.com/databricks/cli/libs/filer (interfaces: Filer) -// -// Generated by this command: -// -// mockgen -destination filer_mock.go github.com/databricks/cli/libs/filer Filer -// -// Package mock_filer is a generated GoMock package. -package mock_filer - -import ( - context "context" - io "io" - fs "io/fs" - reflect "reflect" - - filer "github.com/databricks/cli/libs/filer" - gomock "go.uber.org/mock/gomock" -) - -// MockFiler is a mock of Filer interface. -type MockFiler struct { - ctrl *gomock.Controller - recorder *MockFilerMockRecorder -} - -// MockFilerMockRecorder is the mock recorder for MockFiler. -type MockFilerMockRecorder struct { - mock *MockFiler -} - -// NewMockFiler creates a new mock instance. -func NewMockFiler(ctrl *gomock.Controller) *MockFiler { - mock := &MockFiler{ctrl: ctrl} - mock.recorder = &MockFilerMockRecorder{mock} - return mock -} - -// EXPECT returns an object that allows the caller to indicate expected use. -func (m *MockFiler) EXPECT() *MockFilerMockRecorder { - return m.recorder -} - -// Delete mocks base method. -func (m *MockFiler) Delete(arg0 context.Context, arg1 string, arg2 ...filer.DeleteMode) error { - m.ctrl.T.Helper() - varargs := []any{arg0, arg1} - for _, a := range arg2 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Delete", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// Delete indicates an expected call of Delete. -func (mr *MockFilerMockRecorder) Delete(arg0, arg1 any, arg2 ...any) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]any{arg0, arg1}, arg2...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Delete", reflect.TypeOf((*MockFiler)(nil).Delete), varargs...) -} - -// Mkdir mocks base method. -func (m *MockFiler) Mkdir(arg0 context.Context, arg1 string) error { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Mkdir", arg0, arg1) - ret0, _ := ret[0].(error) - return ret0 -} - -// Mkdir indicates an expected call of Mkdir. -func (mr *MockFilerMockRecorder) Mkdir(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Mkdir", reflect.TypeOf((*MockFiler)(nil).Mkdir), arg0, arg1) -} - -// Read mocks base method. -func (m *MockFiler) Read(arg0 context.Context, arg1 string) (io.ReadCloser, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Read", arg0, arg1) - ret0, _ := ret[0].(io.ReadCloser) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Read indicates an expected call of Read. -func (mr *MockFilerMockRecorder) Read(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Read", reflect.TypeOf((*MockFiler)(nil).Read), arg0, arg1) -} - -// ReadDir mocks base method. -func (m *MockFiler) ReadDir(arg0 context.Context, arg1 string) ([]fs.DirEntry, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ReadDir", arg0, arg1) - ret0, _ := ret[0].([]fs.DirEntry) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// ReadDir indicates an expected call of ReadDir. -func (mr *MockFilerMockRecorder) ReadDir(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ReadDir", reflect.TypeOf((*MockFiler)(nil).ReadDir), arg0, arg1) -} - -// Stat mocks base method. -func (m *MockFiler) Stat(arg0 context.Context, arg1 string) (fs.FileInfo, error) { - m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "Stat", arg0, arg1) - ret0, _ := ret[0].(fs.FileInfo) - ret1, _ := ret[1].(error) - return ret0, ret1 -} - -// Stat indicates an expected call of Stat. -func (mr *MockFilerMockRecorder) Stat(arg0, arg1 any) *gomock.Call { - mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Stat", reflect.TypeOf((*MockFiler)(nil).Stat), arg0, arg1) -} - -// Write mocks base method. -func (m *MockFiler) Write(arg0 context.Context, arg1 string, arg2 io.Reader, arg3 ...filer.WriteMode) error { - m.ctrl.T.Helper() - varargs := []any{arg0, arg1, arg2} - for _, a := range arg3 { - varargs = append(varargs, a) - } - ret := m.ctrl.Call(m, "Write", varargs...) - ret0, _ := ret[0].(error) - return ret0 -} - -// Write indicates an expected call of Write. -func (mr *MockFilerMockRecorder) Write(arg0, arg1, arg2 any, arg3 ...any) *gomock.Call { - mr.mock.ctrl.T.Helper() - varargs := append([]any{arg0, arg1, arg2}, arg3...) - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "Write", reflect.TypeOf((*MockFiler)(nil).Write), varargs...) -} diff --git a/internal/mocks/libs/filer/mock_filer.go b/internal/mocks/libs/filer/mock_filer.go new file mode 100644 index 000000000..d0d58cbda --- /dev/null +++ b/internal/mocks/libs/filer/mock_filer.go @@ -0,0 +1,390 @@ +// Code generated by mockery v2.39.1. DO NOT EDIT. + +package mockfiler + +import ( + context "context" + fs "io/fs" + + filer "github.com/databricks/cli/libs/filer" + + io "io" + + mock "github.com/stretchr/testify/mock" +) + +// MockFiler is an autogenerated mock type for the Filer type +type MockFiler struct { + mock.Mock +} + +type MockFiler_Expecter struct { + mock *mock.Mock +} + +func (_m *MockFiler) EXPECT() *MockFiler_Expecter { + return &MockFiler_Expecter{mock: &_m.Mock} +} + +// Delete provides a mock function with given fields: ctx, path, mode +func (_m *MockFiler) Delete(ctx context.Context, path string, mode ...filer.DeleteMode) error { + _va := make([]interface{}, len(mode)) + for _i := range mode { + _va[_i] = mode[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, path) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, ...filer.DeleteMode) error); ok { + r0 = rf(ctx, path, mode...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFiler_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type MockFiler_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - path string +// - mode ...filer.DeleteMode +func (_e *MockFiler_Expecter) Delete(ctx interface{}, path interface{}, mode ...interface{}) *MockFiler_Delete_Call { + return &MockFiler_Delete_Call{Call: _e.mock.On("Delete", + append([]interface{}{ctx, path}, mode...)...)} +} + +func (_c *MockFiler_Delete_Call) Run(run func(ctx context.Context, path string, mode ...filer.DeleteMode)) *MockFiler_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]filer.DeleteMode, len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(filer.DeleteMode) + } + } + run(args[0].(context.Context), args[1].(string), variadicArgs...) + }) + return _c +} + +func (_c *MockFiler_Delete_Call) Return(_a0 error) *MockFiler_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFiler_Delete_Call) RunAndReturn(run func(context.Context, string, ...filer.DeleteMode) error) *MockFiler_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Mkdir provides a mock function with given fields: ctx, path +func (_m *MockFiler) Mkdir(ctx context.Context, path string) error { + ret := _m.Called(ctx, path) + + if len(ret) == 0 { + panic("no return value specified for Mkdir") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, path) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFiler_Mkdir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Mkdir' +type MockFiler_Mkdir_Call struct { + *mock.Call +} + +// Mkdir is a helper method to define mock.On call +// - ctx context.Context +// - path string +func (_e *MockFiler_Expecter) Mkdir(ctx interface{}, path interface{}) *MockFiler_Mkdir_Call { + return &MockFiler_Mkdir_Call{Call: _e.mock.On("Mkdir", ctx, path)} +} + +func (_c *MockFiler_Mkdir_Call) Run(run func(ctx context.Context, path string)) *MockFiler_Mkdir_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockFiler_Mkdir_Call) Return(_a0 error) *MockFiler_Mkdir_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFiler_Mkdir_Call) RunAndReturn(run func(context.Context, string) error) *MockFiler_Mkdir_Call { + _c.Call.Return(run) + return _c +} + +// Read provides a mock function with given fields: ctx, path +func (_m *MockFiler) Read(ctx context.Context, path string) (io.ReadCloser, error) { + ret := _m.Called(ctx, path) + + if len(ret) == 0 { + panic("no return value specified for Read") + } + + var r0 io.ReadCloser + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (io.ReadCloser, error)); ok { + return rf(ctx, path) + } + if rf, ok := ret.Get(0).(func(context.Context, string) io.ReadCloser); ok { + r0 = rf(ctx, path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(io.ReadCloser) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFiler_Read_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Read' +type MockFiler_Read_Call struct { + *mock.Call +} + +// Read is a helper method to define mock.On call +// - ctx context.Context +// - path string +func (_e *MockFiler_Expecter) Read(ctx interface{}, path interface{}) *MockFiler_Read_Call { + return &MockFiler_Read_Call{Call: _e.mock.On("Read", ctx, path)} +} + +func (_c *MockFiler_Read_Call) Run(run func(ctx context.Context, path string)) *MockFiler_Read_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockFiler_Read_Call) Return(_a0 io.ReadCloser, _a1 error) *MockFiler_Read_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFiler_Read_Call) RunAndReturn(run func(context.Context, string) (io.ReadCloser, error)) *MockFiler_Read_Call { + _c.Call.Return(run) + return _c +} + +// ReadDir provides a mock function with given fields: ctx, path +func (_m *MockFiler) ReadDir(ctx context.Context, path string) ([]fs.DirEntry, error) { + ret := _m.Called(ctx, path) + + if len(ret) == 0 { + panic("no return value specified for ReadDir") + } + + var r0 []fs.DirEntry + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) ([]fs.DirEntry, error)); ok { + return rf(ctx, path) + } + if rf, ok := ret.Get(0).(func(context.Context, string) []fs.DirEntry); ok { + r0 = rf(ctx, path) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]fs.DirEntry) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, path) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFiler_ReadDir_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ReadDir' +type MockFiler_ReadDir_Call struct { + *mock.Call +} + +// ReadDir is a helper method to define mock.On call +// - ctx context.Context +// - path string +func (_e *MockFiler_Expecter) ReadDir(ctx interface{}, path interface{}) *MockFiler_ReadDir_Call { + return &MockFiler_ReadDir_Call{Call: _e.mock.On("ReadDir", ctx, path)} +} + +func (_c *MockFiler_ReadDir_Call) Run(run func(ctx context.Context, path string)) *MockFiler_ReadDir_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockFiler_ReadDir_Call) Return(_a0 []fs.DirEntry, _a1 error) *MockFiler_ReadDir_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFiler_ReadDir_Call) RunAndReturn(run func(context.Context, string) ([]fs.DirEntry, error)) *MockFiler_ReadDir_Call { + _c.Call.Return(run) + return _c +} + +// Stat provides a mock function with given fields: ctx, name +func (_m *MockFiler) Stat(ctx context.Context, name string) (fs.FileInfo, error) { + ret := _m.Called(ctx, name) + + if len(ret) == 0 { + panic("no return value specified for Stat") + } + + var r0 fs.FileInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (fs.FileInfo, error)); ok { + return rf(ctx, name) + } + if rf, ok := ret.Get(0).(func(context.Context, string) fs.FileInfo); ok { + r0 = rf(ctx, name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(fs.FileInfo) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockFiler_Stat_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Stat' +type MockFiler_Stat_Call struct { + *mock.Call +} + +// Stat is a helper method to define mock.On call +// - ctx context.Context +// - name string +func (_e *MockFiler_Expecter) Stat(ctx interface{}, name interface{}) *MockFiler_Stat_Call { + return &MockFiler_Stat_Call{Call: _e.mock.On("Stat", ctx, name)} +} + +func (_c *MockFiler_Stat_Call) Run(run func(ctx context.Context, name string)) *MockFiler_Stat_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockFiler_Stat_Call) Return(_a0 fs.FileInfo, _a1 error) *MockFiler_Stat_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockFiler_Stat_Call) RunAndReturn(run func(context.Context, string) (fs.FileInfo, error)) *MockFiler_Stat_Call { + _c.Call.Return(run) + return _c +} + +// Write provides a mock function with given fields: ctx, path, reader, mode +func (_m *MockFiler) Write(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode) error { + _va := make([]interface{}, len(mode)) + for _i := range mode { + _va[_i] = mode[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, path, reader) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for Write") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string, io.Reader, ...filer.WriteMode) error); ok { + r0 = rf(ctx, path, reader, mode...) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockFiler_Write_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Write' +type MockFiler_Write_Call struct { + *mock.Call +} + +// Write is a helper method to define mock.On call +// - ctx context.Context +// - path string +// - reader io.Reader +// - mode ...filer.WriteMode +func (_e *MockFiler_Expecter) Write(ctx interface{}, path interface{}, reader interface{}, mode ...interface{}) *MockFiler_Write_Call { + return &MockFiler_Write_Call{Call: _e.mock.On("Write", + append([]interface{}{ctx, path, reader}, mode...)...)} +} + +func (_c *MockFiler_Write_Call) Run(run func(ctx context.Context, path string, reader io.Reader, mode ...filer.WriteMode)) *MockFiler_Write_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]filer.WriteMode, len(args)-3) + for i, a := range args[3:] { + if a != nil { + variadicArgs[i] = a.(filer.WriteMode) + } + } + run(args[0].(context.Context), args[1].(string), args[2].(io.Reader), variadicArgs...) + }) + return _c +} + +func (_c *MockFiler_Write_Call) Return(_a0 error) *MockFiler_Write_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockFiler_Write_Call) RunAndReturn(run func(context.Context, string, io.Reader, ...filer.WriteMode) error) *MockFiler_Write_Call { + _c.Call.Return(run) + return _c +} + +// NewMockFiler creates a new instance of MockFiler. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockFiler(t interface { + mock.TestingT + Cleanup(func()) +}) *MockFiler { + mock := &MockFiler{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/repos_test.go b/internal/repos_test.go index 340de3347..de0d926ad 100644 --- a/internal/repos_test.go +++ b/internal/repos_test.go @@ -43,7 +43,7 @@ func createTemporaryRepo(t *testing.T, w *databricks.WorkspaceClient, ctx contex return repoInfo.Id, repoPath } -func TestReposCreateWithProvider(t *testing.T) { +func TestAccReposCreateWithProvider(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -60,7 +60,7 @@ func TestReposCreateWithProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestReposCreateWithoutProvider(t *testing.T) { +func TestAccReposCreateWithoutProvider(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -77,7 +77,7 @@ func TestReposCreateWithoutProvider(t *testing.T) { assert.Equal(t, workspace.ObjectTypeRepo, oi.ObjectType) } -func TestReposGet(t *testing.T) { +func TestAccReposGet(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -106,7 +106,7 @@ func TestReposGet(t *testing.T) { assert.ErrorContains(t, err, "is not a repo") } -func TestReposUpdate(t *testing.T) { +func TestAccReposUpdate(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -127,7 +127,7 @@ func TestReposUpdate(t *testing.T) { assert.Equal(t, byIdOutput.String(), byPathOutput.String()) } -func TestReposDeleteByID(t *testing.T) { +func TestAccReposDeleteByID(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() @@ -146,7 +146,7 @@ func TestReposDeleteByID(t *testing.T) { assert.True(t, apierr.IsMissing(err), err) } -func TestReposDeleteByPath(t *testing.T) { +func TestAccReposDeleteByPath(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() diff --git a/internal/secrets_test.go b/internal/secrets_test.go index b030071bb..59e5d6150 100644 --- a/internal/secrets_test.go +++ b/internal/secrets_test.go @@ -14,7 +14,7 @@ import ( func TestSecretsCreateScopeErrWhenNoArguments(t *testing.T) { _, _, err := RequireErrorRun(t, "secrets", "create-scope") - assert.Equal(t, "accepts 1 arg(s), received 0", err.Error()) + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func temporarySecretScope(ctx context.Context, t *acc.WorkspaceT) string { @@ -61,7 +61,7 @@ func assertSecretBytesValue(t *acc.WorkspaceT, scope, key string, expected []byt assert.Equal(t, expected, decoded) } -func TestSecretsPutSecretStringValue(tt *testing.T) { +func TestAccSecretsPutSecretStringValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" @@ -75,7 +75,7 @@ func TestSecretsPutSecretStringValue(tt *testing.T) { assertSecretBytesValue(t, scope, key, []byte(value)) } -func TestSecretsPutSecretBytesValue(tt *testing.T) { +func TestAccSecretsPutSecretBytesValue(tt *testing.T) { ctx, t := acc.WorkspaceTest(tt) scope := temporarySecretScope(ctx, t) key := "test-key" diff --git a/internal/storage_credentials_test.go b/internal/storage_credentials_test.go index 250ad3399..07c21861f 100644 --- a/internal/storage_credentials_test.go +++ b/internal/storage_credentials_test.go @@ -7,8 +7,12 @@ import ( "github.com/stretchr/testify/assert" ) -func TestStorageCredentialsListRendersResponse(t *testing.T) { +func TestAccStorageCredentialsListRendersResponse(t *testing.T) { _, _ = acc.WorkspaceTest(t) + + // Check if metastore is assigned for the workspace, otherwise test will fail + t.Log(GetEnvOrSkipTest(t, "TEST_METASTORE_ID")) + stdout, stderr := RequireSuccessfulRun(t, "storage-credentials", "list") assert.NotEmpty(t, stdout) assert.Empty(t, stderr) diff --git a/internal/sync_test.go b/internal/sync_test.go index f970a7ce0..4021e6490 100644 --- a/internal/sync_test.go +++ b/internal/sync_test.go @@ -313,7 +313,7 @@ func TestAccSyncNestedFolderSync(t *testing.T) { assertSync.remoteDirContent(ctx, "dir1", []string{"dir2"}) assertSync.remoteDirContent(ctx, "dir1/dir2", []string{"dir3"}) assertSync.remoteDirContent(ctx, "dir1/dir2/dir3", []string{"foo.txt"}) - assertSync.snapshotContains(append(repoFiles, ".gitignore", filepath.FromSlash("dir1/dir2/dir3/foo.txt"))) + assertSync.snapshotContains(append(repoFiles, ".gitignore", "dir1/dir2/dir3/foo.txt")) // delete f.Remove(t) @@ -374,7 +374,7 @@ func TestAccSyncNestedSpacePlusAndHashAreEscapedSync(t *testing.T) { assertSync.remoteDirContent(ctx, "dir1", []string{"a b+c"}) assertSync.remoteDirContent(ctx, "dir1/a b+c", []string{"c+d e"}) assertSync.remoteDirContent(ctx, "dir1/a b+c/c+d e", []string{"e+f g#i.txt"}) - assertSync.snapshotContains(append(repoFiles, ".gitignore", filepath.FromSlash("dir1/a b+c/c+d e/e+f g#i.txt"))) + assertSync.snapshotContains(append(repoFiles, ".gitignore", "dir1/a b+c/c+d e/e+f g#i.txt")) // delete f.Remove(t) @@ -404,7 +404,7 @@ func TestAccSyncIncrementalFileOverwritesFolder(t *testing.T) { assertSync.waitForCompletionMarker() assertSync.remoteDirContent(ctx, "", append(repoFiles, ".gitignore", "foo")) assertSync.remoteDirContent(ctx, "foo", []string{"bar.txt"}) - assertSync.snapshotContains(append(repoFiles, ".gitignore", filepath.FromSlash("foo/bar.txt"))) + assertSync.snapshotContains(append(repoFiles, ".gitignore", "foo/bar.txt")) // delete foo/bar.txt f.Remove(t) diff --git a/internal/testutil/cloud.go b/internal/testutil/cloud.go index 50bbf67f2..e547069f3 100644 --- a/internal/testutil/cloud.go +++ b/internal/testutil/cloud.go @@ -41,6 +41,9 @@ func GetCloud(t *testing.T) Cloud { return Azure case "gcp": return GCP + // CLOUD_ENV is set to "ucws" in the "aws-prod-ucws" test environment + case "ucws": + return AWS default: t.Fatalf("Unknown cloud environment: %s", env) } diff --git a/internal/testutil/env.go b/internal/testutil/env.go index 39201c5b4..e1973ba82 100644 --- a/internal/testutil/env.go +++ b/internal/testutil/env.go @@ -2,9 +2,12 @@ package testutil import ( "os" + "path/filepath" "runtime" "strings" "testing" + + "github.com/stretchr/testify/require" ) // CleanupEnvironment sets up a pristine environment containing only $PATH and $HOME. @@ -44,3 +47,23 @@ func GetEnvOrSkipTest(t *testing.T, name string) string { } return value } + +// Changes into specified directory for the duration of the test. +// Returns the current working directory. +func Chdir(t *testing.T, dir string) string { + wd, err := os.Getwd() + require.NoError(t, err) + + abs, err := filepath.Abs(dir) + require.NoError(t, err) + + err = os.Chdir(abs) + require.NoError(t, err) + + t.Cleanup(func() { + err := os.Chdir(wd) + require.NoError(t, err) + }) + + return wd +} diff --git a/internal/testutil/file.go b/internal/testutil/file.go new file mode 100644 index 000000000..ba2c3280e --- /dev/null +++ b/internal/testutil/file.go @@ -0,0 +1,48 @@ +package testutil + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/require" +) + +func TouchNotebook(t *testing.T, elems ...string) string { + path := filepath.Join(elems...) + err := os.MkdirAll(filepath.Dir(path), 0755) + require.NoError(t, err) + + err = os.WriteFile(path, []byte("# Databricks notebook source"), 0644) + require.NoError(t, err) + return path +} + +func Touch(t *testing.T, elems ...string) string { + path := filepath.Join(elems...) + err := os.MkdirAll(filepath.Dir(path), 0755) + require.NoError(t, err) + + f, err := os.Create(path) + require.NoError(t, err) + + err = f.Close() + require.NoError(t, err) + return path +} + +func WriteFile(t *testing.T, content string, elems ...string) string { + path := filepath.Join(elems...) + err := os.MkdirAll(filepath.Dir(path), 0755) + require.NoError(t, err) + + f, err := os.Create(path) + require.NoError(t, err) + + _, err = f.WriteString(content) + require.NoError(t, err) + + err = f.Close() + require.NoError(t, err) + return path +} diff --git a/internal/unknown_command_test.go b/internal/unknown_command_test.go new file mode 100644 index 000000000..62b84027f --- /dev/null +++ b/internal/unknown_command_test.go @@ -0,0 +1,15 @@ +package internal + +import ( + "testing" + + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +func TestUnknownCommand(t *testing.T) { + stdout, stderr, err := RequireErrorRun(t, "unknown-command") + + assert.Error(t, err, "unknown command", `unknown command "unknown-command" for "databricks"`) + assert.Equal(t, "", stdout.String()) + assert.Contains(t, stderr.String(), "unknown command") +} diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 164677390..bc354914f 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -34,15 +34,15 @@ func TestAccWorkspaceList(t *testing.T) { func TestWorkpaceListErrorWhenNoArguments(t *testing.T) { _, _, err := RequireErrorRun(t, "workspace", "list") - assert.Equal(t, "accepts 1 arg(s), received 0", err.Error()) + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } func TestWorkpaceGetStatusErrorWhenNoArguments(t *testing.T) { _, _, err := RequireErrorRun(t, "workspace", "get-status") - assert.Equal(t, "accepts 1 arg(s), received 0", err.Error()) + assert.Contains(t, err.Error(), "accepts 1 arg(s), received 0") } -func TestWorkpaceExportPrintsContents(t *testing.T) { +func TestAccWorkpaceExportPrintsContents(t *testing.T) { t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) ctx := context.Background() diff --git a/libs/auth/cache/cache.go b/libs/auth/cache/cache.go index 5511c1922..097353e74 100644 --- a/libs/auth/cache/cache.go +++ b/libs/auth/cache/cache.go @@ -1,106 +1,26 @@ package cache import ( - "encoding/json" - "errors" - "fmt" - "io/fs" - "os" - "path/filepath" + "context" "golang.org/x/oauth2" ) -const ( - // where the token cache is stored - tokenCacheFile = ".databricks/token-cache.json" - - // only the owner of the file has full execute, read, and write access - ownerExecReadWrite = 0o700 - - // only the owner of the file has full read and write access - ownerReadWrite = 0o600 - - // format versioning leaves some room for format improvement - tokenCacheVersion = 1 -) - -var ErrNotConfigured = errors.New("databricks OAuth is not configured for this host") - -// this implementation requires the calling code to do a machine-wide lock, -// otherwise the file might get corrupt. -type TokenCache struct { - Version int `json:"version"` - Tokens map[string]*oauth2.Token `json:"tokens"` - - fileLocation string +type TokenCache interface { + Store(key string, t *oauth2.Token) error + Lookup(key string) (*oauth2.Token, error) } -func (c *TokenCache) Store(key string, t *oauth2.Token) error { - err := c.load() - if errors.Is(err, fs.ErrNotExist) { - dir := filepath.Dir(c.fileLocation) - err = os.MkdirAll(dir, ownerExecReadWrite) - if err != nil { - return fmt.Errorf("mkdir: %w", err) - } - } else if err != nil { - return fmt.Errorf("load: %w", err) - } - c.Version = tokenCacheVersion - if c.Tokens == nil { - c.Tokens = map[string]*oauth2.Token{} - } - c.Tokens[key] = t - raw, err := json.MarshalIndent(c, "", " ") - if err != nil { - return fmt.Errorf("marshal: %w", err) - } - return os.WriteFile(c.fileLocation, raw, ownerReadWrite) +var tokenCache int + +func WithTokenCache(ctx context.Context, c TokenCache) context.Context { + return context.WithValue(ctx, &tokenCache, c) } -func (c *TokenCache) Lookup(key string) (*oauth2.Token, error) { - err := c.load() - if errors.Is(err, fs.ErrNotExist) { - return nil, ErrNotConfigured - } else if err != nil { - return nil, fmt.Errorf("load: %w", err) - } - t, ok := c.Tokens[key] +func GetTokenCache(ctx context.Context) TokenCache { + c, ok := ctx.Value(&tokenCache).(TokenCache) if !ok { - return nil, ErrNotConfigured + return &FileTokenCache{} } - return t, nil -} - -func (c *TokenCache) location() (string, error) { - home, err := os.UserHomeDir() - if err != nil { - return "", fmt.Errorf("home: %w", err) - } - return filepath.Join(home, tokenCacheFile), nil -} - -func (c *TokenCache) load() error { - loc, err := c.location() - if err != nil { - return err - } - c.fileLocation = loc - raw, err := os.ReadFile(loc) - if err != nil { - return fmt.Errorf("read: %w", err) - } - err = json.Unmarshal(raw, c) - if err != nil { - return fmt.Errorf("parse: %w", err) - } - if c.Version != tokenCacheVersion { - // in the later iterations we could do state upgraders, - // so that we transform token cache from v1 to v2 without - // losing the tokens and asking the user to re-authenticate. - return fmt.Errorf("needs version %d, got version %d", - tokenCacheVersion, c.Version) - } - return nil + return c } diff --git a/libs/auth/cache/file.go b/libs/auth/cache/file.go new file mode 100644 index 000000000..38dfea9f2 --- /dev/null +++ b/libs/auth/cache/file.go @@ -0,0 +1,108 @@ +package cache + +import ( + "encoding/json" + "errors" + "fmt" + "io/fs" + "os" + "path/filepath" + + "golang.org/x/oauth2" +) + +const ( + // where the token cache is stored + tokenCacheFile = ".databricks/token-cache.json" + + // only the owner of the file has full execute, read, and write access + ownerExecReadWrite = 0o700 + + // only the owner of the file has full read and write access + ownerReadWrite = 0o600 + + // format versioning leaves some room for format improvement + tokenCacheVersion = 1 +) + +var ErrNotConfigured = errors.New("databricks OAuth is not configured for this host") + +// this implementation requires the calling code to do a machine-wide lock, +// otherwise the file might get corrupt. +type FileTokenCache struct { + Version int `json:"version"` + Tokens map[string]*oauth2.Token `json:"tokens"` + + fileLocation string +} + +func (c *FileTokenCache) Store(key string, t *oauth2.Token) error { + err := c.load() + if errors.Is(err, fs.ErrNotExist) { + dir := filepath.Dir(c.fileLocation) + err = os.MkdirAll(dir, ownerExecReadWrite) + if err != nil { + return fmt.Errorf("mkdir: %w", err) + } + } else if err != nil { + return fmt.Errorf("load: %w", err) + } + c.Version = tokenCacheVersion + if c.Tokens == nil { + c.Tokens = map[string]*oauth2.Token{} + } + c.Tokens[key] = t + raw, err := json.MarshalIndent(c, "", " ") + if err != nil { + return fmt.Errorf("marshal: %w", err) + } + return os.WriteFile(c.fileLocation, raw, ownerReadWrite) +} + +func (c *FileTokenCache) Lookup(key string) (*oauth2.Token, error) { + err := c.load() + if errors.Is(err, fs.ErrNotExist) { + return nil, ErrNotConfigured + } else if err != nil { + return nil, fmt.Errorf("load: %w", err) + } + t, ok := c.Tokens[key] + if !ok { + return nil, ErrNotConfigured + } + return t, nil +} + +func (c *FileTokenCache) location() (string, error) { + home, err := os.UserHomeDir() + if err != nil { + return "", fmt.Errorf("home: %w", err) + } + return filepath.Join(home, tokenCacheFile), nil +} + +func (c *FileTokenCache) load() error { + loc, err := c.location() + if err != nil { + return err + } + c.fileLocation = loc + raw, err := os.ReadFile(loc) + if err != nil { + return fmt.Errorf("read: %w", err) + } + err = json.Unmarshal(raw, c) + if err != nil { + return fmt.Errorf("parse: %w", err) + } + if c.Version != tokenCacheVersion { + // in the later iterations we could do state upgraders, + // so that we transform token cache from v1 to v2 without + // losing the tokens and asking the user to re-authenticate. + return fmt.Errorf("needs version %d, got version %d", + tokenCacheVersion, c.Version) + } + return nil +} + +var _ TokenCache = (*FileTokenCache)(nil) diff --git a/libs/auth/cache/cache_test.go b/libs/auth/cache/file_test.go similarity index 93% rename from libs/auth/cache/cache_test.go rename to libs/auth/cache/file_test.go index 6529882c7..3e4aae36f 100644 --- a/libs/auth/cache/cache_test.go +++ b/libs/auth/cache/file_test.go @@ -27,7 +27,7 @@ func setup(t *testing.T) string { func TestStoreAndLookup(t *testing.T) { setup(t) - c := &TokenCache{} + c := &FileTokenCache{} err := c.Store("x", &oauth2.Token{ AccessToken: "abc", }) @@ -38,7 +38,7 @@ func TestStoreAndLookup(t *testing.T) { }) require.NoError(t, err) - l := &TokenCache{} + l := &FileTokenCache{} tok, err := l.Lookup("x") require.NoError(t, err) assert.Equal(t, "abc", tok.AccessToken) @@ -50,7 +50,7 @@ func TestStoreAndLookup(t *testing.T) { func TestNoCacheFileReturnsErrNotConfigured(t *testing.T) { setup(t) - l := &TokenCache{} + l := &FileTokenCache{} _, err := l.Lookup("x") assert.Equal(t, ErrNotConfigured, err) } @@ -63,7 +63,7 @@ func TestLoadCorruptFile(t *testing.T) { err = os.WriteFile(f, []byte("abc"), ownerExecReadWrite) require.NoError(t, err) - l := &TokenCache{} + l := &FileTokenCache{} _, err = l.Lookup("x") assert.EqualError(t, err, "load: parse: invalid character 'a' looking for beginning of value") } @@ -76,14 +76,14 @@ func TestLoadWrongVersion(t *testing.T) { err = os.WriteFile(f, []byte(`{"version": 823, "things": []}`), ownerExecReadWrite) require.NoError(t, err) - l := &TokenCache{} + l := &FileTokenCache{} _, err = l.Lookup("x") assert.EqualError(t, err, "load: needs version 1, got version 823") } func TestDevNull(t *testing.T) { t.Setenv(homeEnvVar, "/dev/null") - l := &TokenCache{} + l := &FileTokenCache{} _, err := l.Lookup("x") // macOS/Linux: load: read: open /dev/null/.databricks/token-cache.json: // windows: databricks OAuth is not configured for this host @@ -95,7 +95,7 @@ func TestStoreOnDev(t *testing.T) { t.SkipNow() } t.Setenv(homeEnvVar, "/dev") - c := &TokenCache{} + c := &FileTokenCache{} err := c.Store("x", &oauth2.Token{ AccessToken: "abc", }) diff --git a/libs/auth/cache/in_memory.go b/libs/auth/cache/in_memory.go new file mode 100644 index 000000000..469d45575 --- /dev/null +++ b/libs/auth/cache/in_memory.go @@ -0,0 +1,26 @@ +package cache + +import ( + "golang.org/x/oauth2" +) + +type InMemoryTokenCache struct { + Tokens map[string]*oauth2.Token +} + +// Lookup implements TokenCache. +func (i *InMemoryTokenCache) Lookup(key string) (*oauth2.Token, error) { + token, ok := i.Tokens[key] + if !ok { + return nil, ErrNotConfigured + } + return token, nil +} + +// Store implements TokenCache. +func (i *InMemoryTokenCache) Store(key string, t *oauth2.Token) error { + i.Tokens[key] = t + return nil +} + +var _ TokenCache = (*InMemoryTokenCache)(nil) diff --git a/libs/auth/cache/in_memory_test.go b/libs/auth/cache/in_memory_test.go new file mode 100644 index 000000000..d8394d3b2 --- /dev/null +++ b/libs/auth/cache/in_memory_test.go @@ -0,0 +1,44 @@ +package cache + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "golang.org/x/oauth2" +) + +func TestInMemoryCacheHit(t *testing.T) { + token := &oauth2.Token{ + AccessToken: "abc", + } + c := &InMemoryTokenCache{ + Tokens: map[string]*oauth2.Token{ + "key": token, + }, + } + res, err := c.Lookup("key") + assert.Equal(t, res, token) + assert.NoError(t, err) +} + +func TestInMemoryCacheMiss(t *testing.T) { + c := &InMemoryTokenCache{ + Tokens: map[string]*oauth2.Token{}, + } + _, err := c.Lookup("key") + assert.ErrorIs(t, err, ErrNotConfigured) +} + +func TestInMemoryCacheStore(t *testing.T) { + token := &oauth2.Token{ + AccessToken: "abc", + } + c := &InMemoryTokenCache{ + Tokens: map[string]*oauth2.Token{}, + } + err := c.Store("key", token) + assert.NoError(t, err) + res, err := c.Lookup("key") + assert.Equal(t, res, token) + assert.NoError(t, err) +} diff --git a/libs/auth/oauth.go b/libs/auth/oauth.go index dd27d04b2..1f3e032de 100644 --- a/libs/auth/oauth.go +++ b/libs/auth/oauth.go @@ -6,22 +6,34 @@ import ( "crypto/sha256" _ "embed" "encoding/base64" - "encoding/json" "errors" "fmt" - "io" "net" - "net/http" "strings" "time" "github.com/databricks/cli/libs/auth/cache" + "github.com/databricks/databricks-sdk-go/httpclient" "github.com/databricks/databricks-sdk-go/retries" "github.com/pkg/browser" "golang.org/x/oauth2" "golang.org/x/oauth2/authhandler" ) +var apiClientForOauth int + +func WithApiClientForOAuth(ctx context.Context, c *httpclient.ApiClient) context.Context { + return context.WithValue(ctx, &apiClientForOauth, c) +} + +func GetApiClientForOAuth(ctx context.Context) *httpclient.ApiClient { + c, ok := ctx.Value(&apiClientForOauth).(*httpclient.ApiClient) + if !ok { + return httpclient.NewApiClient(httpclient.ClientConfig{}) + } + return c +} + const ( // these values are predefined by Databricks as a public client // and is specific to this application only. Using these values @@ -30,7 +42,7 @@ const ( appRedirectAddr = "localhost:8020" // maximum amount of time to acquire listener on appRedirectAddr - DefaultTimeout = 45 * time.Second + listenerTimeout = 45 * time.Second ) var ( // Databricks SDK API: `databricks OAuth is not` will be checked for presence @@ -43,19 +55,14 @@ type PersistentAuth struct { Host string AccountID string - http httpGet - cache tokenCache + http *httpclient.ApiClient + cache cache.TokenCache ln net.Listener browser func(string) error } -type httpGet interface { - Get(string) (*http.Response, error) -} - -type tokenCache interface { - Store(key string, t *oauth2.Token) error - Lookup(key string) (*oauth2.Token, error) +func (a *PersistentAuth) SetApiClient(h *httpclient.ApiClient) { + a.http = h } func (a *PersistentAuth) Load(ctx context.Context) (*oauth2.Token, error) { @@ -77,10 +84,12 @@ func (a *PersistentAuth) Load(ctx context.Context) (*oauth2.Token, error) { } // OAuth2 config is invoked only for expired tokens to speed up // the happy path in the token retrieval - cfg, err := a.oauth2Config() + cfg, err := a.oauth2Config(ctx) if err != nil { return nil, err } + // make OAuth2 library use our client + ctx = a.http.InContextForOAuth2(ctx) // eagerly refresh token refreshed, err := cfg.TokenSource(ctx, t).Token() if err != nil { @@ -110,7 +119,7 @@ func (a *PersistentAuth) Challenge(ctx context.Context) error { if err != nil { return fmt.Errorf("init: %w", err) } - cfg, err := a.oauth2Config() + cfg, err := a.oauth2Config(ctx) if err != nil { return err } @@ -120,6 +129,8 @@ func (a *PersistentAuth) Challenge(ctx context.Context) error { } defer cb.Close() state, pkce := a.stateAndPKCE() + // make OAuth2 library use our client + ctx = a.http.InContextForOAuth2(ctx) ts := authhandler.TokenSourceWithPKCE(ctx, cfg, state, cb.Handler, pkce) t, err := ts.Token() if err != nil { @@ -138,10 +149,10 @@ func (a *PersistentAuth) init(ctx context.Context) error { return ErrFetchCredentials } if a.http == nil { - a.http = http.DefaultClient + a.http = GetApiClientForOAuth(ctx) } if a.cache == nil { - a.cache = &cache.TokenCache{} + a.cache = cache.GetTokenCache(ctx) } if a.browser == nil { a.browser = browser.OpenURL @@ -149,7 +160,7 @@ func (a *PersistentAuth) init(ctx context.Context) error { // try acquire listener, which we also use as a machine-local // exclusive lock to prevent token cache corruption in the scope // of developer machine, where this command runs. - listener, err := retries.Poll(ctx, DefaultTimeout, + listener, err := retries.Poll(ctx, listenerTimeout, func() (*net.Listener, *retries.Err) { var lc net.ListenConfig l, err := lc.Listen(ctx, "tcp", appRedirectAddr) @@ -172,7 +183,7 @@ func (a *PersistentAuth) Close() error { return a.ln.Close() } -func (a *PersistentAuth) oidcEndpoints() (*oauthAuthorizationServer, error) { +func (a *PersistentAuth) oidcEndpoints(ctx context.Context) (*oauthAuthorizationServer, error) { prefix := a.key() if a.AccountID != "" { return &oauthAuthorizationServer{ @@ -180,31 +191,20 @@ func (a *PersistentAuth) oidcEndpoints() (*oauthAuthorizationServer, error) { TokenEndpoint: fmt.Sprintf("%s/v1/token", prefix), }, nil } + var oauthEndpoints oauthAuthorizationServer oidc := fmt.Sprintf("%s/oidc/.well-known/oauth-authorization-server", prefix) - oidcResponse, err := a.http.Get(oidc) + err := a.http.Do(ctx, "GET", oidc, httpclient.WithResponseUnmarshal(&oauthEndpoints)) if err != nil { return nil, fmt.Errorf("fetch .well-known: %w", err) } - if oidcResponse.StatusCode != 200 { + var httpErr *httpclient.HttpError + if errors.As(err, &httpErr) && httpErr.StatusCode != 200 { return nil, ErrOAuthNotSupported } - if oidcResponse.Body == nil { - return nil, fmt.Errorf("fetch .well-known: empty body") - } - defer oidcResponse.Body.Close() - raw, err := io.ReadAll(oidcResponse.Body) - if err != nil { - return nil, fmt.Errorf("read .well-known: %w", err) - } - var oauthEndpoints oauthAuthorizationServer - err = json.Unmarshal(raw, &oauthEndpoints) - if err != nil { - return nil, fmt.Errorf("parse .well-known: %w", err) - } return &oauthEndpoints, nil } -func (a *PersistentAuth) oauth2Config() (*oauth2.Config, error) { +func (a *PersistentAuth) oauth2Config(ctx context.Context) (*oauth2.Config, error) { // in this iteration of CLI, we're using all scopes by default, // because tools like CLI and Terraform do use all apis. This // decision may be reconsidered later, once we have a proper @@ -213,7 +213,7 @@ func (a *PersistentAuth) oauth2Config() (*oauth2.Config, error) { "offline_access", "all-apis", } - endpoints, err := a.oidcEndpoints() + endpoints, err := a.oidcEndpoints(ctx) if err != nil { return nil, fmt.Errorf("oidc: %w", err) } diff --git a/libs/auth/oauth_test.go b/libs/auth/oauth_test.go index 9b5aa9ac9..ea6a8061e 100644 --- a/libs/auth/oauth_test.go +++ b/libs/auth/oauth_test.go @@ -5,14 +5,14 @@ import ( "crypto/tls" _ "embed" "fmt" - "io" "net/http" "net/url" - "strings" "testing" "time" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/httpclient/fixtures" "github.com/databricks/databricks-sdk-go/qa" "github.com/stretchr/testify/assert" "golang.org/x/oauth2" @@ -24,34 +24,29 @@ func TestOidcEndpointsForAccounts(t *testing.T) { AccountID: "xyz", } defer p.Close() - s, err := p.oidcEndpoints() + s, err := p.oidcEndpoints(context.Background()) assert.NoError(t, err) assert.Equal(t, "https://abc/oidc/accounts/xyz/v1/authorize", s.AuthorizationEndpoint) assert.Equal(t, "https://abc/oidc/accounts/xyz/v1/token", s.TokenEndpoint) } -type mockGet func(url string) (*http.Response, error) - -func (m mockGet) Get(url string) (*http.Response, error) { - return m(url) -} - func TestOidcForWorkspace(t *testing.T) { p := &PersistentAuth{ Host: "abc", - http: mockGet(func(url string) (*http.Response, error) { - assert.Equal(t, "https://abc/oidc/.well-known/oauth-authorization-server", url) - return &http.Response{ - StatusCode: 200, - Body: io.NopCloser(strings.NewReader(`{ - "authorization_endpoint": "a", - "token_endpoint": "b" - }`)), - }, nil + http: httpclient.NewApiClient(httpclient.ClientConfig{ + Transport: fixtures.MappingTransport{ + "GET /oidc/.well-known/oauth-authorization-server": { + Status: 200, + Response: map[string]string{ + "authorization_endpoint": "a", + "token_endpoint": "b", + }, + }, + }, }), } defer p.Close() - endpoints, err := p.oidcEndpoints() + endpoints, err := p.oidcEndpoints(context.Background()) assert.NoError(t, err) assert.Equal(t, "a", endpoints.AuthorizationEndpoint) assert.Equal(t, "b", endpoints.TokenEndpoint) diff --git a/libs/auth/service_principal.go b/libs/auth/service_principal.go index cb488d16e..5f1854e3a 100644 --- a/libs/auth/service_principal.go +++ b/libs/auth/service_principal.go @@ -4,12 +4,12 @@ import ( "github.com/google/uuid" ) -// Determines whether a given user id is a service principal. -// This function uses a heuristic: if the user id is a UUID, then we assume +// Determines whether a given user name is a service principal. +// This function uses a heuristic: if the user name is a UUID, then we assume // it's a service principal. Unfortunately, the service principal listing API is too // slow for our purposes. And the "users" and "service principals get" APIs // only allow access by workspace admins. -func IsServicePrincipal(userId string) bool { - _, err := uuid.Parse(userId) +func IsServicePrincipal(userName string) bool { + _, err := uuid.Parse(userName) return err == nil } diff --git a/libs/cmdgroup/command.go b/libs/cmdgroup/command.go new file mode 100644 index 000000000..a2a776935 --- /dev/null +++ b/libs/cmdgroup/command.go @@ -0,0 +1,108 @@ +package cmdgroup + +import ( + "io" + "strings" + "text/template" + "unicode" + + "github.com/spf13/cobra" + "github.com/spf13/pflag" +) + +type CommandWithGroupFlag struct { + cmd *cobra.Command + flagGroups []*FlagGroup +} + +func (c *CommandWithGroupFlag) Command() *cobra.Command { + return c.cmd +} + +func (c *CommandWithGroupFlag) FlagGroups() []*FlagGroup { + return c.flagGroups +} + +func (c *CommandWithGroupFlag) NonGroupedFlags() *pflag.FlagSet { + nonGrouped := pflag.NewFlagSet("non-grouped", pflag.ContinueOnError) + c.cmd.LocalFlags().VisitAll(func(f *pflag.Flag) { + for _, fg := range c.flagGroups { + if fg.Has(f) { + return + } + } + nonGrouped.AddFlag(f) + }) + + return nonGrouped +} + +func (c *CommandWithGroupFlag) HasNonGroupedFlags() bool { + return c.NonGroupedFlags().HasFlags() +} + +func NewCommandWithGroupFlag(cmd *cobra.Command) *CommandWithGroupFlag { + cmdWithFlagGroups := &CommandWithGroupFlag{cmd: cmd, flagGroups: make([]*FlagGroup, 0)} + cmd.SetUsageFunc(func(c *cobra.Command) error { + err := tmpl(c.OutOrStderr(), c.UsageTemplate(), cmdWithFlagGroups) + if err != nil { + c.PrintErrln(err) + } + return nil + }) + cmd.SetUsageTemplate(usageTemplate) + return cmdWithFlagGroups +} + +func (c *CommandWithGroupFlag) AddFlagGroup(fg *FlagGroup) { + c.flagGroups = append(c.flagGroups, fg) + c.cmd.Flags().AddFlagSet(fg.FlagSet()) +} + +type FlagGroup struct { + name string + description string + flagSet *pflag.FlagSet +} + +func NewFlagGroup(name string) *FlagGroup { + return &FlagGroup{name: name, flagSet: pflag.NewFlagSet(name, pflag.ContinueOnError)} +} + +func (c *FlagGroup) Name() string { + return c.name +} + +func (c *FlagGroup) Description() string { + return c.description +} + +func (c *FlagGroup) SetDescription(description string) { + c.description = description +} + +func (c *FlagGroup) FlagSet() *pflag.FlagSet { + return c.flagSet +} + +func (c *FlagGroup) Has(f *pflag.Flag) bool { + return c.flagSet.Lookup(f.Name) != nil +} + +var templateFuncs = template.FuncMap{ + "trim": strings.TrimSpace, + "trimRightSpace": trimRightSpace, + "trimTrailingWhitespaces": trimRightSpace, +} + +func trimRightSpace(s string) string { + return strings.TrimRightFunc(s, unicode.IsSpace) +} + +// tmpl executes the given template text on data, writing the result to w. +func tmpl(w io.Writer, text string, data interface{}) error { + t := template.New("top") + t.Funcs(templateFuncs) + template.Must(t.Parse(text)) + return t.Execute(w, data) +} diff --git a/libs/cmdgroup/command_test.go b/libs/cmdgroup/command_test.go new file mode 100644 index 000000000..f3e3fe6ab --- /dev/null +++ b/libs/cmdgroup/command_test.go @@ -0,0 +1,72 @@ +package cmdgroup + +import ( + "bytes" + "testing" + + "github.com/spf13/cobra" + "github.com/stretchr/testify/require" +) + +func TestCommandFlagGrouping(t *testing.T) { + cmd := &cobra.Command{ + Use: "test [flags]", + Short: "test command", + Run: func(cmd *cobra.Command, args []string) { + // Do nothing + }, + } + + parent := &cobra.Command{ + Use: "parent", + } + + parent.PersistentFlags().String("global", "", "Global flag") + parent.AddCommand(cmd) + + wrappedCmd := NewCommandWithGroupFlag(cmd) + jobGroup := NewFlagGroup("Job") + jobGroup.SetDescription("Description.") + fs := jobGroup.FlagSet() + fs.String("job-name", "", "Name of the job") + fs.String("job-type", "", "Type of the job") + wrappedCmd.AddFlagGroup(jobGroup) + + pipelineGroup := NewFlagGroup("Pipeline") + fs = pipelineGroup.FlagSet() + fs.String("pipeline-name", "", "Name of the pipeline") + fs.String("pipeline-type", "", "Type of the pipeline") + wrappedCmd.AddFlagGroup(pipelineGroup) + + cmd.Flags().BoolP("bool", "b", false, "Bool flag") + + buf := bytes.NewBuffer(nil) + cmd.SetOutput(buf) + cmd.Usage() + + expected := `Usage: + parent test [flags] + +Job Flags: + Description. + --job-name string Name of the job + --job-type string Type of the job + +Pipeline Flags: + --pipeline-name string Name of the pipeline + --pipeline-type string Type of the pipeline + +Flags: + -b, --bool Bool flag + +Global Flags: + --global string Global flag +` + require.Equal(t, expected, buf.String()) + + require.NotNil(t, cmd.Flags().Lookup("job-name")) + require.NotNil(t, cmd.Flags().Lookup("job-type")) + require.NotNil(t, cmd.Flags().Lookup("pipeline-name")) + require.NotNil(t, cmd.Flags().Lookup("pipeline-type")) + require.NotNil(t, cmd.Flags().Lookup("bool")) +} diff --git a/libs/cmdgroup/template.go b/libs/cmdgroup/template.go new file mode 100644 index 000000000..d2062c558 --- /dev/null +++ b/libs/cmdgroup/template.go @@ -0,0 +1,15 @@ +package cmdgroup + +const usageTemplate = `Usage:{{if .Command.Runnable}} + {{.Command.UseLine}}{{end}} +{{range .FlagGroups}} +{{.Name}} Flags:{{if not (eq .Description "")}} + {{.Description}}{{end}} +{{.FlagSet.FlagUsages | trimTrailingWhitespaces}} +{{end}} +{{if .HasNonGroupedFlags}}Flags: +{{.NonGroupedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}}{{if .Command.HasAvailableInheritedFlags}} + +Global Flags: +{{.Command.InheritedFlags.FlagUsages | trimTrailingWhitespaces}}{{end}} +` diff --git a/libs/cmdio/io.go b/libs/cmdio/io.go index d20991a7c..75c0c4b87 100644 --- a/libs/cmdio/io.go +++ b/libs/cmdio/io.go @@ -22,27 +22,29 @@ import ( type cmdIO struct { // states if we are in the interactive mode // e.g. if stdout is a terminal - interactive bool - outputFormat flags.Output - template string - in io.Reader - out io.Writer - err io.Writer + interactive bool + outputFormat flags.Output + headerTemplate string + template string + in io.Reader + out io.Writer + err io.Writer } -func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer, template string) *cmdIO { +func NewIO(outputFormat flags.Output, in io.Reader, out io.Writer, err io.Writer, headerTemplate, template string) *cmdIO { // The check below is similar to color.NoColor but uses the specified err writer. dumb := os.Getenv("NO_COLOR") != "" || os.Getenv("TERM") == "dumb" if f, ok := err.(*os.File); ok && !dumb { dumb = !isatty.IsTerminal(f.Fd()) && !isatty.IsCygwinTerminal(f.Fd()) } return &cmdIO{ - interactive: !dumb, - outputFormat: outputFormat, - template: template, - in: in, - out: out, - err: err, + interactive: !dumb, + outputFormat: outputFormat, + headerTemplate: headerTemplate, + template: template, + in: in, + out: out, + err: err, } } @@ -113,48 +115,6 @@ func IsGitBash(ctx context.Context) bool { return false } -func Render(ctx context.Context, v any) error { - c := fromContext(ctx) - return RenderWithTemplate(ctx, v, c.template) -} - -func RenderWithTemplate(ctx context.Context, v any, template string) error { - // TODO: add terminal width & white/dark theme detection - c := fromContext(ctx) - switch c.outputFormat { - case flags.OutputJSON: - return renderJson(c.out, v) - case flags.OutputText: - if template != "" { - return renderTemplate(c.out, template, v) - } - return renderJson(c.out, v) - default: - return fmt.Errorf("invalid output format: %s", c.outputFormat) - } -} - -func RenderJson(ctx context.Context, v any) error { - c := fromContext(ctx) - if c.outputFormat == flags.OutputJSON { - return renderJson(c.out, v) - } - return nil -} - -func RenderReader(ctx context.Context, r io.Reader) error { - c := fromContext(ctx) - switch c.outputFormat { - case flags.OutputJSON: - return fmt.Errorf("json output not supported") - case flags.OutputText: - _, err := io.Copy(c.out, r) - return err - default: - return fmt.Errorf("invalid output format: %s", c.outputFormat) - } -} - type Tuple struct{ Name, Id string } func (c *cmdIO) Select(items []Tuple, label string) (id string, err error) { diff --git a/libs/cmdio/render.go b/libs/cmdio/render.go index d641f61df..ec851b8ff 100644 --- a/libs/cmdio/render.go +++ b/libs/cmdio/render.go @@ -2,14 +2,19 @@ package cmdio import ( "bytes" + "context" "encoding/base64" "encoding/json" + "errors" + "fmt" "io" "strings" "text/tabwriter" "text/template" "time" + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/listing" "github.com/fatih/color" "github.com/nwidger/jsoncolor" ) @@ -46,8 +51,123 @@ func Heredoc(tmpl string) (trimmed string) { return strings.TrimSpace(trimmed) } -func renderJson(w io.Writer, v any) error { - pretty, err := fancyJSON(v) +// writeFlusher represents a buffered writer that can be flushed. This is useful when +// buffering writing a large number of resources (such as during a list API). +type writeFlusher interface { + io.Writer + Flush() error +} + +type jsonRenderer interface { + // Render an object as JSON to the provided writeFlusher. + renderJson(context.Context, writeFlusher) error +} + +type textRenderer interface { + // Render an object as text to the provided writeFlusher. + renderText(context.Context, io.Writer) error +} + +type templateRenderer interface { + // Render an object using the provided template and write to the provided tabwriter.Writer. + renderTemplate(context.Context, *template.Template, *tabwriter.Writer) error +} + +type readerRenderer struct { + reader io.Reader +} + +func (r readerRenderer) renderText(_ context.Context, w io.Writer) error { + _, err := io.Copy(w, r.reader) + return err +} + +type iteratorRenderer[T any] struct { + t listing.Iterator[T] + bufferSize int +} + +func (ir iteratorRenderer[T]) getBufferSize() int { + if ir.bufferSize == 0 { + return 20 + } + return ir.bufferSize +} + +func (ir iteratorRenderer[T]) renderJson(ctx context.Context, w writeFlusher) error { + // Iterators are always rendered as a list of resources in JSON. + _, err := w.Write([]byte("[\n ")) + if err != nil { + return err + } + for i := 0; ir.t.HasNext(ctx); i++ { + if i != 0 { + _, err = w.Write([]byte(",\n ")) + if err != nil { + return err + } + } + n, err := ir.t.Next(ctx) + if err != nil { + return err + } + res, err := json.MarshalIndent(n, " ", " ") + if err != nil { + return err + } + _, err = w.Write(res) + if err != nil { + return err + } + if (i+1)%ir.getBufferSize() == 0 { + err = w.Flush() + if err != nil { + return err + } + } + } + _, err = w.Write([]byte("\n]\n")) + if err != nil { + return err + } + return w.Flush() +} + +func (ir iteratorRenderer[T]) renderTemplate(ctx context.Context, t *template.Template, w *tabwriter.Writer) error { + buf := make([]any, 0, ir.getBufferSize()) + for i := 0; ir.t.HasNext(ctx); i++ { + n, err := ir.t.Next(ctx) + if err != nil { + return err + } + buf = append(buf, n) + if len(buf) == cap(buf) { + err = t.Execute(w, buf) + if err != nil { + return err + } + err = w.Flush() + if err != nil { + return err + } + buf = buf[:0] + } + } + if len(buf) > 0 { + err := t.Execute(w, buf) + if err != nil { + return err + } + } + return w.Flush() +} + +type defaultRenderer struct { + t any +} + +func (d defaultRenderer) renderJson(_ context.Context, w writeFlusher) error { + pretty, err := fancyJSON(d.t) if err != nil { return err } @@ -56,12 +176,126 @@ func renderJson(w io.Writer, v any) error { return err } _, err = w.Write([]byte("\n")) - return err + if err != nil { + return err + } + return w.Flush() } -func renderTemplate(w io.Writer, tmpl string, v any) error { +func (d defaultRenderer) renderTemplate(_ context.Context, t *template.Template, w *tabwriter.Writer) error { + return t.Execute(w, d.t) +} + +// Returns something implementing one of the following interfaces: +// - jsonRenderer +// - textRenderer +// - templateRenderer +func newRenderer(t any) any { + if r, ok := t.(io.Reader); ok { + return readerRenderer{reader: r} + } + return defaultRenderer{t: t} +} + +func newIteratorRenderer[T any](i listing.Iterator[T]) iteratorRenderer[T] { + return iteratorRenderer[T]{t: i} +} + +type bufferedFlusher struct { + w io.Writer + b *bytes.Buffer +} + +func (b bufferedFlusher) Write(bs []byte) (int, error) { + return b.b.Write(bs) +} + +func (b bufferedFlusher) Flush() error { + _, err := b.w.Write(b.b.Bytes()) + if err != nil { + return err + } + b.b.Reset() + return nil +} + +func newBufferedFlusher(w io.Writer) writeFlusher { + return bufferedFlusher{ + w: w, + b: &bytes.Buffer{}, + } +} + +func renderWithTemplate(r any, ctx context.Context, outputFormat flags.Output, w io.Writer, headerTemplate, template string) error { + // TODO: add terminal width & white/dark theme detection + switch outputFormat { + case flags.OutputJSON: + if jr, ok := r.(jsonRenderer); ok { + return jr.renderJson(ctx, newBufferedFlusher(w)) + } + return errors.New("json output not supported") + case flags.OutputText: + if tr, ok := r.(templateRenderer); ok && template != "" { + return renderUsingTemplate(ctx, tr, w, headerTemplate, template) + } + if tr, ok := r.(textRenderer); ok { + return tr.renderText(ctx, w) + } + if jr, ok := r.(jsonRenderer); ok { + return jr.renderJson(ctx, newBufferedFlusher(w)) + } + return errors.New("no renderer defined") + default: + return fmt.Errorf("invalid output format: %s", outputFormat) + } +} + +type listingInterface interface { + HasNext(context.Context) bool +} + +func Render(ctx context.Context, v any) error { + c := fromContext(ctx) + if _, ok := v.(listingInterface); ok { + panic("use RenderIterator instead") + } + return renderWithTemplate(newRenderer(v), ctx, c.outputFormat, c.out, c.headerTemplate, c.template) +} + +func RenderIterator[T any](ctx context.Context, i listing.Iterator[T]) error { + c := fromContext(ctx) + return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, c.headerTemplate, c.template) +} + +func RenderWithTemplate(ctx context.Context, v any, headerTemplate, template string) error { + c := fromContext(ctx) + if _, ok := v.(listingInterface); ok { + panic("use RenderIteratorWithTemplate instead") + } + return renderWithTemplate(newRenderer(v), ctx, c.outputFormat, c.out, headerTemplate, template) +} + +func RenderIteratorWithTemplate[T any](ctx context.Context, i listing.Iterator[T], headerTemplate, template string) error { + c := fromContext(ctx) + return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, headerTemplate, template) +} + +func RenderJson(ctx context.Context, v any) error { + c := fromContext(ctx) + if _, ok := v.(listingInterface); ok { + panic("use RenderIteratorJson instead") + } + return renderWithTemplate(newRenderer(v), ctx, flags.OutputJSON, c.out, c.headerTemplate, c.template) +} + +func RenderIteratorJson[T any](ctx context.Context, i listing.Iterator[T]) error { + c := fromContext(ctx) + return renderWithTemplate(newIteratorRenderer(i), ctx, c.outputFormat, c.out, c.headerTemplate, c.template) +} + +func renderUsingTemplate(ctx context.Context, r templateRenderer, w io.Writer, headerTmpl, tmpl string) error { tw := tabwriter.NewWriter(w, 0, 4, 2, ' ', 0) - t, err := template.New("command").Funcs(template.FuncMap{ + base := template.New("command").Funcs(template.FuncMap{ // we render colored output if stdout is TTY, otherwise we render text. // in the future we'll check if we can explicitly check for stderr being // a TTY @@ -72,6 +306,12 @@ func renderTemplate(w io.Writer, tmpl string, v any) error { "yellow": color.YellowString, "magenta": color.MagentaString, "cyan": color.CyanString, + "bold": func(format string, a ...interface{}) string { + return color.New(color.Bold).Sprintf(format, a...) + }, + "italic": func(format string, a ...interface{}) string { + return color.New(color.Italic).Sprintf(format, a...) + }, "replace": strings.ReplaceAll, "join": strings.Join, "bool": func(v bool) string { @@ -116,11 +356,24 @@ func renderTemplate(w io.Writer, tmpl string, v any) error { } return string(out), nil }, - }).Parse(tmpl) + }) + if headerTmpl != "" { + headerT, err := base.Parse(headerTmpl) + if err != nil { + return err + } + err = headerT.Execute(tw, nil) + if err != nil { + return err + } + tw.Write([]byte("\n")) + // Do not flush here. Instead, allow the first 100 resources to determine the initial spacing of the header columns. + } + t, err := base.Parse(tmpl) if err != nil { return err } - err = t.Execute(tw, v) + err = r.renderTemplate(ctx, t, tw) if err != nil { return err } diff --git a/libs/cmdio/render_test.go b/libs/cmdio/render_test.go new file mode 100644 index 000000000..6bde446c4 --- /dev/null +++ b/libs/cmdio/render_test.go @@ -0,0 +1,190 @@ +package cmdio + +import ( + "bytes" + "context" + "encoding/json" + "errors" + "fmt" + "strings" + "testing" + + "github.com/databricks/cli/libs/flags" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/service/provisioning" + "github.com/stretchr/testify/assert" +) + +type testCase struct { + name string + v any + outputFormat flags.Output + headerTemplate string + template string + expected string + errMessage string +} + +var dummyWorkspace1 = provisioning.Workspace{ + WorkspaceId: 123, + WorkspaceName: "abc", +} + +var dummyWorkspace2 = provisioning.Workspace{ + WorkspaceId: 456, + WorkspaceName: "def", +} + +type dummyIterator struct { + items []*provisioning.Workspace +} + +func (d *dummyIterator) HasNext(_ context.Context) bool { + return len(d.items) > 0 +} + +func (d *dummyIterator) Next(ctx context.Context) (*provisioning.Workspace, error) { + if !d.HasNext(ctx) { + return nil, errors.New("no more items") + } + item := d.items[0] + d.items = d.items[1:] + return item, nil +} + +func makeWorkspaces(count int) []*provisioning.Workspace { + res := make([]*provisioning.Workspace, 0, count) + next := []*provisioning.Workspace{&dummyWorkspace1, &dummyWorkspace2} + for i := 0; i < count; i++ { + n := next[0] + next = append(next[1:], n) + res = append(res, n) + } + return res +} + +func makeIterator(count int) listing.Iterator[*provisioning.Workspace] { + items := make([]*provisioning.Workspace, 0, count) + items = append(items, makeWorkspaces(count)...) + return &dummyIterator{ + items: items, + } +} + +func makeBigOutput(count int) string { + res := bytes.Buffer{} + for _, ws := range makeWorkspaces(count) { + res.Write([]byte(fmt.Sprintf("%d %s\n", ws.WorkspaceId, ws.WorkspaceName))) + } + return res.String() +} + +func must[T any](a T, e error) T { + if e != nil { + panic(e) + } + return a +} + +var testCases = []testCase{ + { + name: "Workspace with header and template", + v: dummyWorkspace1, + outputFormat: flags.OutputText, + headerTemplate: "id\tname", + template: "{{.WorkspaceId}}\t{{.WorkspaceName}}", + expected: `id name +123 abc`, + }, + { + name: "Workspace with no header and template", + v: dummyWorkspace1, + outputFormat: flags.OutputText, + template: "{{.WorkspaceId}}\t{{.WorkspaceName}}", + expected: `123 abc`, + }, + { + name: "Workspace with no header and no template", + v: dummyWorkspace1, + outputFormat: flags.OutputText, + expected: `{ + "workspace_id":123, + "workspace_name":"abc" +} +`, + }, + { + name: "Workspace Iterator with header and template", + v: makeIterator(2), + outputFormat: flags.OutputText, + headerTemplate: "id\tname", + template: "{{range .}}{{.WorkspaceId}}\t{{.WorkspaceName}}\n{{end}}", + expected: `id name +123 abc +456 def +`, + }, + { + name: "Workspace Iterator with no header and template", + v: makeIterator(2), + outputFormat: flags.OutputText, + template: "{{range .}}{{.WorkspaceId}}\t{{.WorkspaceName}}\n{{end}}", + expected: `123 abc +456 def +`, + }, + { + name: "Workspace Iterator with no header and no template", + v: makeIterator(2), + outputFormat: flags.OutputText, + expected: string(must(json.MarshalIndent(makeWorkspaces(2), "", " "))) + "\n", + }, + { + name: "Big Workspace Iterator with template", + v: makeIterator(234), + outputFormat: flags.OutputText, + headerTemplate: "id\tname", + template: "{{range .}}{{.WorkspaceId}}\t{{.WorkspaceName}}\n{{end}}", + expected: "id name\n" + makeBigOutput(234), + }, + { + name: "Big Workspace Iterator with no template", + v: makeIterator(234), + outputFormat: flags.OutputText, + expected: string(must(json.MarshalIndent(makeWorkspaces(234), "", " "))) + "\n", + }, + { + name: "io.Reader", + v: strings.NewReader("a test"), + outputFormat: flags.OutputText, + expected: "a test", + }, + { + name: "io.Reader", + v: strings.NewReader("a test"), + outputFormat: flags.OutputJSON, + errMessage: "json output not supported", + }, +} + +func TestRender(t *testing.T) { + for _, c := range testCases { + t.Run(c.name, func(t *testing.T) { + output := &bytes.Buffer{} + cmdIO := NewIO(c.outputFormat, nil, output, output, c.headerTemplate, c.template) + ctx := InContext(context.Background(), cmdIO) + var err error + if vv, ok := c.v.(listing.Iterator[*provisioning.Workspace]); ok { + err = RenderIterator(ctx, vv) + } else { + err = Render(ctx, c.v) + } + if c.errMessage != "" { + assert.ErrorContains(t, err, c.errMessage) + } else { + assert.NoError(t, err) + assert.Equal(t, c.expected, output.String()) + } + }) + } +} diff --git a/libs/databrickscfg/cfgpickers/clusters_test.go b/libs/databrickscfg/cfgpickers/clusters_test.go index 8afcd6d07..2e62f93a8 100644 --- a/libs/databrickscfg/cfgpickers/clusters_test.go +++ b/libs/databrickscfg/cfgpickers/clusters_test.go @@ -115,7 +115,7 @@ func TestFirstCompatibleCluster(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) clusterID, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.NoError(t, err) require.Equal(t, "bcd-id", clusterID) @@ -162,7 +162,7 @@ func TestNoCompatibleClusters(t *testing.T) { w := databricks.Must(databricks.NewWorkspaceClient((*databricks.Config)(cfg))) ctx := context.Background() - ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "...")) + ctx = cmdio.InContext(ctx, cmdio.NewIO(flags.OutputText, &bytes.Buffer{}, &bytes.Buffer{}, &bytes.Buffer{}, "", "...")) _, err := AskForCluster(ctx, w, WithDatabricksConnect("13.1")) require.Equal(t, ErrNoCompatibleClusters, err) } diff --git a/libs/databrickscfg/loader.go b/libs/databrickscfg/loader.go index 1dc2a9452..12a516c59 100644 --- a/libs/databrickscfg/loader.go +++ b/libs/databrickscfg/loader.go @@ -4,7 +4,7 @@ import ( "context" "errors" "fmt" - "os" + "io/fs" "strings" "github.com/databricks/cli/libs/log" @@ -68,7 +68,7 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error { ctx := context.Background() configFile, err := config.LoadFile(cfg.ConfigFile) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } return fmt.Errorf("cannot parse config file: %w", err) @@ -98,7 +98,10 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error { } log.Debugf(ctx, "Loading profile %s because of host match", match.Name()) - err = config.ConfigAttributes.ResolveFromStringMap(cfg, match.KeysHash()) + err = config.ConfigAttributes.ResolveFromStringMapWithSource(cfg, match.KeysHash(), config.Source{ + Type: config.SourceFile, + Name: configFile.Path(), + }) if err != nil { return fmt.Errorf("%s %s profile: %w", configFile.Path(), match.Name(), err) } @@ -110,7 +113,7 @@ func (l profileFromHostLoader) Configure(cfg *config.Config) error { func (l profileFromHostLoader) isAnyAuthConfigured(cfg *config.Config) bool { // If any of the auth-specific attributes are set, we can skip profile resolution. for _, a := range config.ConfigAttributes { - if a.Auth == "" { + if !a.HasAuthAttribute() { continue } if !a.IsZero(cfg) { diff --git a/libs/databrickscfg/loader_test.go b/libs/databrickscfg/loader_test.go index 4525115e0..c42fcdbdd 100644 --- a/libs/databrickscfg/loader_test.go +++ b/libs/databrickscfg/loader_test.go @@ -68,7 +68,7 @@ func TestLoaderErrorsOnInvalidFile(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/badcfg", + ConfigFile: "profile/testdata/badcfg", Host: "https://default", } @@ -81,7 +81,7 @@ func TestLoaderSkipsNoMatchingHost(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/databrickscfg", + ConfigFile: "profile/testdata/databrickscfg", Host: "https://noneofthehostsmatch", } @@ -95,7 +95,7 @@ func TestLoaderMatchingHost(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/databrickscfg", + ConfigFile: "profile/testdata/databrickscfg", Host: "https://default", } @@ -110,7 +110,7 @@ func TestLoaderMatchingHostWithQuery(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/databrickscfg", + ConfigFile: "profile/testdata/databrickscfg", Host: "https://query/?foo=bar", } @@ -125,7 +125,7 @@ func TestLoaderErrorsOnMultipleMatches(t *testing.T) { Loaders: []config.Loader{ ResolveProfileFromHost, }, - ConfigFile: "testdata/databrickscfg", + ConfigFile: "profile/testdata/databrickscfg", Host: "https://foo/bar", } diff --git a/libs/databrickscfg/ops.go b/libs/databrickscfg/ops.go index 90795afd5..6a1c182af 100644 --- a/libs/databrickscfg/ops.go +++ b/libs/databrickscfg/ops.go @@ -2,7 +2,9 @@ package databrickscfg import ( "context" + "errors" "fmt" + "io/fs" "os" "strings" @@ -29,7 +31,7 @@ func loadOrCreateConfigFile(filename string) (*config.File, error) { filename = fmt.Sprintf("%s%s", homedir, filename[1:]) } configFile, err := config.LoadFile(filename) - if err != nil && os.IsNotExist(err) { + if err != nil && errors.Is(err, fs.ErrNotExist) { file, err := os.OpenFile(filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, fileMode) if err != nil { return nil, fmt.Errorf("create %s: %w", filename, err) diff --git a/libs/databrickscfg/ops_test.go b/libs/databrickscfg/ops_test.go index 233555fe2..3ea92024c 100644 --- a/libs/databrickscfg/ops_test.go +++ b/libs/databrickscfg/ops_test.go @@ -30,7 +30,7 @@ func TestLoadOrCreate_NotAllowed(t *testing.T) { } func TestLoadOrCreate_Bad(t *testing.T) { - path := "testdata/badcfg" + path := "profile/testdata/badcfg" file, err := loadOrCreateConfigFile(path) assert.Error(t, err) assert.Nil(t, file) @@ -40,7 +40,7 @@ func TestMatchOrCreateSection_Direct(t *testing.T) { cfg := &config.Config{ Profile: "query", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -54,7 +54,7 @@ func TestMatchOrCreateSection_AccountID(t *testing.T) { cfg := &config.Config{ AccountID: "abc", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -68,7 +68,7 @@ func TestMatchOrCreateSection_NormalizeHost(t *testing.T) { cfg := &config.Config{ Host: "https://query/?o=abracadabra", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -80,7 +80,7 @@ func TestMatchOrCreateSection_NormalizeHost(t *testing.T) { func TestMatchOrCreateSection_NoProfileOrHost(t *testing.T) { cfg := &config.Config{} - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -92,7 +92,7 @@ func TestMatchOrCreateSection_MultipleProfiles(t *testing.T) { cfg := &config.Config{ Host: "https://foo", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() @@ -105,7 +105,7 @@ func TestMatchOrCreateSection_NewProfile(t *testing.T) { Host: "https://bar", Profile: "delirium", } - file, err := loadOrCreateConfigFile("testdata/databrickscfg") + file, err := loadOrCreateConfigFile("profile/testdata/databrickscfg") assert.NoError(t, err) ctx := context.Background() diff --git a/libs/databrickscfg/profile/context.go b/libs/databrickscfg/profile/context.go new file mode 100644 index 000000000..fa4d2ad8a --- /dev/null +++ b/libs/databrickscfg/profile/context.go @@ -0,0 +1,17 @@ +package profile + +import "context" + +var profiler int + +func WithProfiler(ctx context.Context, p Profiler) context.Context { + return context.WithValue(ctx, &profiler, p) +} + +func GetProfiler(ctx context.Context) Profiler { + p, ok := ctx.Value(&profiler).(Profiler) + if !ok { + return DefaultProfiler + } + return p +} diff --git a/libs/databrickscfg/profile/file.go b/libs/databrickscfg/profile/file.go new file mode 100644 index 000000000..1b743014e --- /dev/null +++ b/libs/databrickscfg/profile/file.go @@ -0,0 +1,100 @@ +package profile + +import ( + "context" + "errors" + "fmt" + "io/fs" + "path/filepath" + "strings" + + "github.com/databricks/cli/libs/env" + "github.com/databricks/databricks-sdk-go/config" + "github.com/spf13/cobra" +) + +type FileProfilerImpl struct{} + +func (f FileProfilerImpl) getPath(ctx context.Context, replaceHomeDirWithTilde bool) (string, error) { + configFile := env.Get(ctx, "DATABRICKS_CONFIG_FILE") + if configFile == "" { + configFile = "~/.databrickscfg" + } + if !replaceHomeDirWithTilde { + return configFile, nil + } + homedir, err := env.UserHomeDir(ctx) + if err != nil { + return "", err + } + configFile = strings.Replace(configFile, homedir, "~", 1) + return configFile, nil +} + +// Get the path to the .databrickscfg file, falling back to the default in the current user's home directory. +func (f FileProfilerImpl) GetPath(ctx context.Context) (string, error) { + fp, err := f.getPath(ctx, true) + if err != nil { + return "", err + } + return filepath.Clean(fp), nil +} + +var ErrNoConfiguration = errors.New("no configuration file found") + +func (f FileProfilerImpl) Get(ctx context.Context) (*config.File, error) { + path, err := f.getPath(ctx, false) + if err != nil { + return nil, fmt.Errorf("cannot determine Databricks config file path: %w", err) + } + if strings.HasPrefix(path, "~") { + homedir, err := env.UserHomeDir(ctx) + if err != nil { + return nil, err + } + path = filepath.Join(homedir, path[1:]) + } + configFile, err := config.LoadFile(path) + if errors.Is(err, fs.ErrNotExist) { + // downstreams depend on ErrNoConfiguration. TODO: expose this error through SDK + return nil, fmt.Errorf("%w at %s; please create one by running 'databricks configure'", ErrNoConfiguration, path) + } else if err != nil { + return nil, err + } + return configFile, nil +} + +func (f FileProfilerImpl) LoadProfiles(ctx context.Context, fn ProfileMatchFunction) (profiles Profiles, err error) { + file, err := f.Get(ctx) + if err != nil { + return nil, fmt.Errorf("cannot load Databricks config file: %w", err) + } + + // Iterate over sections and collect matching profiles. + for _, v := range file.Sections() { + all := v.KeysHash() + host, ok := all["host"] + if !ok { + // invalid profile + continue + } + profile := Profile{ + Name: v.Name(), + Host: host, + AccountID: all["account_id"], + } + if fn(profile) { + profiles = append(profiles, profile) + } + } + + return +} + +func ProfileCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { + profiles, err := DefaultProfiler.LoadProfiles(cmd.Context(), MatchAllProfiles) + if err != nil { + return nil, cobra.ShellCompDirectiveError + } + return profiles.Names(), cobra.ShellCompDirectiveNoFileComp +} diff --git a/libs/databrickscfg/profiles_test.go b/libs/databrickscfg/profile/file_test.go similarity index 82% rename from libs/databrickscfg/profiles_test.go rename to libs/databrickscfg/profile/file_test.go index 33a5c9dfd..8e5cfefc0 100644 --- a/libs/databrickscfg/profiles_test.go +++ b/libs/databrickscfg/profile/file_test.go @@ -1,4 +1,4 @@ -package databrickscfg +package profile import ( "context" @@ -32,7 +32,8 @@ func TestLoadProfilesReturnsHomedirAsTilde(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "testdata") ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") - file, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + profiler := FileProfilerImpl{} + file, err := profiler.GetPath(ctx) require.NoError(t, err) require.Equal(t, filepath.Clean("~/databrickscfg"), file) } @@ -41,7 +42,8 @@ func TestLoadProfilesReturnsHomedirAsTildeExoticFile(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "testdata") ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "~/databrickscfg") - file, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + profiler := FileProfilerImpl{} + file, err := profiler.GetPath(ctx) require.NoError(t, err) require.Equal(t, filepath.Clean("~/databrickscfg"), file) } @@ -49,7 +51,8 @@ func TestLoadProfilesReturnsHomedirAsTildeExoticFile(t *testing.T) { func TestLoadProfilesReturnsHomedirAsTildeDefaultFile(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "testdata/sample-home") - file, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + profiler := FileProfilerImpl{} + file, err := profiler.GetPath(ctx) require.NoError(t, err) require.Equal(t, filepath.Clean("~/.databrickscfg"), file) } @@ -57,14 +60,16 @@ func TestLoadProfilesReturnsHomedirAsTildeDefaultFile(t *testing.T) { func TestLoadProfilesNoConfiguration(t *testing.T) { ctx := context.Background() ctx = env.WithUserHomeDir(ctx, "testdata") - _, _, err := LoadProfiles(ctx, func(p Profile) bool { return true }) + profiler := FileProfilerImpl{} + _, err := profiler.LoadProfiles(ctx, MatchAllProfiles) require.ErrorIs(t, err, ErrNoConfiguration) } func TestLoadProfilesMatchWorkspace(t *testing.T) { ctx := context.Background() ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") - _, profiles, err := LoadProfiles(ctx, MatchWorkspaceProfiles) + profiler := FileProfilerImpl{} + profiles, err := profiler.LoadProfiles(ctx, MatchWorkspaceProfiles) require.NoError(t, err) assert.Equal(t, []string{"DEFAULT", "query", "foo1", "foo2"}, profiles.Names()) } @@ -72,7 +77,8 @@ func TestLoadProfilesMatchWorkspace(t *testing.T) { func TestLoadProfilesMatchAccount(t *testing.T) { ctx := context.Background() ctx = env.Set(ctx, "DATABRICKS_CONFIG_FILE", "./testdata/databrickscfg") - _, profiles, err := LoadProfiles(ctx, MatchAccountProfiles) + profiler := FileProfilerImpl{} + profiles, err := profiler.LoadProfiles(ctx, MatchAccountProfiles) require.NoError(t, err) assert.Equal(t, []string{"acc"}, profiles.Names()) } diff --git a/libs/databrickscfg/profile/in_memory.go b/libs/databrickscfg/profile/in_memory.go new file mode 100644 index 000000000..902ae42e6 --- /dev/null +++ b/libs/databrickscfg/profile/in_memory.go @@ -0,0 +1,25 @@ +package profile + +import "context" + +type InMemoryProfiler struct { + Profiles Profiles +} + +// GetPath implements Profiler. +func (i InMemoryProfiler) GetPath(context.Context) (string, error) { + return "", nil +} + +// LoadProfiles implements Profiler. +func (i InMemoryProfiler) LoadProfiles(ctx context.Context, f ProfileMatchFunction) (Profiles, error) { + res := make(Profiles, 0) + for _, p := range i.Profiles { + if f(p) { + res = append(res, p) + } + } + return res, nil +} + +var _ Profiler = InMemoryProfiler{} diff --git a/libs/databrickscfg/profile/profile.go b/libs/databrickscfg/profile/profile.go new file mode 100644 index 000000000..510e5c9e5 --- /dev/null +++ b/libs/databrickscfg/profile/profile.go @@ -0,0 +1,49 @@ +package profile + +import ( + "strings" + + "github.com/databricks/databricks-sdk-go/config" +) + +// Profile holds a subset of the keys in a databrickscfg profile. +// It should only be used for prompting and filtering. +// Use its name to construct a config.Config. +type Profile struct { + Name string + Host string + AccountID string +} + +func (p Profile) Cloud() string { + cfg := config.Config{Host: p.Host} + switch { + case cfg.IsAws(): + return "AWS" + case cfg.IsAzure(): + return "Azure" + case cfg.IsGcp(): + return "GCP" + default: + return "" + } +} + +type Profiles []Profile + +// SearchCaseInsensitive implements the promptui.Searcher interface. +// This allows the user to immediately starting typing to narrow down the list. +func (p Profiles) SearchCaseInsensitive(input string, index int) bool { + input = strings.ToLower(input) + name := strings.ToLower(p[index].Name) + host := strings.ToLower(p[index].Host) + return strings.Contains(name, input) || strings.Contains(host, input) +} + +func (p Profiles) Names() []string { + names := make([]string, len(p)) + for i, v := range p { + names[i] = v.Name + } + return names +} diff --git a/libs/databrickscfg/profile/profiler.go b/libs/databrickscfg/profile/profiler.go new file mode 100644 index 000000000..c0a549256 --- /dev/null +++ b/libs/databrickscfg/profile/profiler.go @@ -0,0 +1,32 @@ +package profile + +import ( + "context" +) + +type ProfileMatchFunction func(Profile) bool + +func MatchWorkspaceProfiles(p Profile) bool { + return p.AccountID == "" +} + +func MatchAccountProfiles(p Profile) bool { + return p.Host != "" && p.AccountID != "" +} + +func MatchAllProfiles(p Profile) bool { + return true +} + +func WithName(name string) ProfileMatchFunction { + return func(p Profile) bool { + return p.Name == name + } +} + +type Profiler interface { + LoadProfiles(context.Context, ProfileMatchFunction) (Profiles, error) + GetPath(context.Context) (string, error) +} + +var DefaultProfiler = FileProfilerImpl{} diff --git a/libs/databrickscfg/testdata/badcfg b/libs/databrickscfg/profile/testdata/badcfg similarity index 100% rename from libs/databrickscfg/testdata/badcfg rename to libs/databrickscfg/profile/testdata/badcfg diff --git a/libs/databrickscfg/testdata/databrickscfg b/libs/databrickscfg/profile/testdata/databrickscfg similarity index 100% rename from libs/databrickscfg/testdata/databrickscfg rename to libs/databrickscfg/profile/testdata/databrickscfg diff --git a/libs/databrickscfg/testdata/sample-home/.databrickscfg b/libs/databrickscfg/profile/testdata/sample-home/.databrickscfg similarity index 100% rename from libs/databrickscfg/testdata/sample-home/.databrickscfg rename to libs/databrickscfg/profile/testdata/sample-home/.databrickscfg diff --git a/libs/databrickscfg/profiles.go b/libs/databrickscfg/profiles.go deleted file mode 100644 index c7bb27195..000000000 --- a/libs/databrickscfg/profiles.go +++ /dev/null @@ -1,150 +0,0 @@ -package databrickscfg - -import ( - "context" - "errors" - "fmt" - "io/fs" - "path/filepath" - "strings" - - "github.com/databricks/cli/libs/env" - "github.com/databricks/databricks-sdk-go/config" - "github.com/spf13/cobra" -) - -// Profile holds a subset of the keys in a databrickscfg profile. -// It should only be used for prompting and filtering. -// Use its name to construct a config.Config. -type Profile struct { - Name string - Host string - AccountID string -} - -func (p Profile) Cloud() string { - cfg := config.Config{Host: p.Host} - switch { - case cfg.IsAws(): - return "AWS" - case cfg.IsAzure(): - return "Azure" - case cfg.IsGcp(): - return "GCP" - default: - return "" - } -} - -type Profiles []Profile - -func (p Profiles) Names() []string { - names := make([]string, len(p)) - for i, v := range p { - names[i] = v.Name - } - return names -} - -// SearchCaseInsensitive implements the promptui.Searcher interface. -// This allows the user to immediately starting typing to narrow down the list. -func (p Profiles) SearchCaseInsensitive(input string, index int) bool { - input = strings.ToLower(input) - name := strings.ToLower(p[index].Name) - host := strings.ToLower(p[index].Host) - return strings.Contains(name, input) || strings.Contains(host, input) -} - -type ProfileMatchFunction func(Profile) bool - -func MatchWorkspaceProfiles(p Profile) bool { - return p.AccountID == "" -} - -func MatchAccountProfiles(p Profile) bool { - return p.Host != "" && p.AccountID != "" -} - -func MatchAllProfiles(p Profile) bool { - return true -} - -// Get the path to the .databrickscfg file, falling back to the default in the current user's home directory. -func GetPath(ctx context.Context) (string, error) { - configFile := env.Get(ctx, "DATABRICKS_CONFIG_FILE") - if configFile == "" { - configFile = "~/.databrickscfg" - } - if strings.HasPrefix(configFile, "~") { - homedir, err := env.UserHomeDir(ctx) - if err != nil { - return "", err - } - configFile = filepath.Join(homedir, configFile[1:]) - } - return configFile, nil -} - -var ErrNoConfiguration = errors.New("no configuration file found") - -func Get(ctx context.Context) (*config.File, error) { - path, err := GetPath(ctx) - if err != nil { - return nil, fmt.Errorf("cannot determine Databricks config file path: %w", err) - } - configFile, err := config.LoadFile(path) - if errors.Is(err, fs.ErrNotExist) { - // downstreams depend on ErrNoConfiguration. TODO: expose this error through SDK - return nil, fmt.Errorf("%w at %s; please create one first", ErrNoConfiguration, path) - } else if err != nil { - return nil, err - } - return configFile, nil -} - -func LoadProfiles(ctx context.Context, fn ProfileMatchFunction) (file string, profiles Profiles, err error) { - f, err := Get(ctx) - if err != nil { - return "", nil, fmt.Errorf("cannot load Databricks config file: %w", err) - } - - // Replace homedir with ~ if applicable. - // This is to make the output more readable. - file = filepath.Clean(f.Path()) - home, err := env.UserHomeDir(ctx) - if err != nil { - return "", nil, err - } - homedir := filepath.Clean(home) - if strings.HasPrefix(file, homedir) { - file = "~" + file[len(homedir):] - } - - // Iterate over sections and collect matching profiles. - for _, v := range f.Sections() { - all := v.KeysHash() - host, ok := all["host"] - if !ok { - // invalid profile - continue - } - profile := Profile{ - Name: v.Name(), - Host: host, - AccountID: all["account_id"], - } - if fn(profile) { - profiles = append(profiles, profile) - } - } - - return -} - -func ProfileCompletion(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) { - _, profiles, err := LoadProfiles(cmd.Context(), MatchAllProfiles) - if err != nil { - return nil, cobra.ShellCompDirectiveError - } - return profiles.Names(), cobra.ShellCompDirectiveNoFileComp -} diff --git a/libs/diag/diagnostic.go b/libs/diag/diagnostic.go index 02d2e7c17..621527551 100644 --- a/libs/diag/diagnostic.go +++ b/libs/diag/diagnostic.go @@ -20,6 +20,10 @@ type Diagnostic struct { // Location is a source code location associated with the diagnostic message. // It may be zero if there is no associated location. Location dyn.Location + + // Path is a path to the value in a configuration tree that the diagnostic is associated with. + // It may be nil if there is no associated path. + Path dyn.Path } // Errorf creates a new error diagnostic. @@ -32,6 +36,19 @@ func Errorf(format string, args ...any) Diagnostics { } } +// FromErr returns a new error diagnostic from the specified error, if any. +func FromErr(err error) Diagnostics { + if err == nil { + return nil + } + return []Diagnostic{ + { + Severity: Error, + Summary: err.Error(), + }, + } +} + // Warningf creates a new warning diagnostic. func Warningf(format string, args ...any) Diagnostics { return []Diagnostic{ @@ -74,3 +91,24 @@ func (ds Diagnostics) HasError() bool { } return false } + +// Return first error in the set of diagnostics. +func (ds Diagnostics) Error() error { + for _, d := range ds { + if d.Severity == Error { + return fmt.Errorf(d.Summary) + } + } + return nil +} + +// Filter returns a new list of diagnostics that match the specified severity. +func (ds Diagnostics) Filter(severity Severity) Diagnostics { + var out Diagnostics + for _, d := range ds { + if d.Severity == severity { + out = append(out, d) + } + } + return out +} diff --git a/libs/dyn/convert/end_to_end_test.go b/libs/dyn/convert/end_to_end_test.go index 7c048136e..f0e428a69 100644 --- a/libs/dyn/convert/end_to_end_test.go +++ b/libs/dyn/convert/end_to_end_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) @@ -67,4 +67,49 @@ func TestAdditional(t *testing.T) { SliceOfPointer: []*string{nil}, }) }) + + t.Run("pointer to a empty string", func(t *testing.T) { + s := "" + assertFromTypedToTypedEqual(t, &s) + }) + + t.Run("nil pointer", func(t *testing.T) { + var s *string + assertFromTypedToTypedEqual(t, s) + }) + + t.Run("pointer to struct with scalar values", func(t *testing.T) { + s := "" + type foo struct { + A string `json:"a"` + B int `json:"b"` + C bool `json:"c"` + D *string `json:"d"` + } + assertFromTypedToTypedEqual(t, &foo{ + A: "a", + B: 1, + C: true, + D: &s, + }) + assertFromTypedToTypedEqual(t, &foo{ + A: "", + B: 0, + C: false, + D: nil, + }) + }) + + t.Run("map with scalar values", func(t *testing.T) { + assertFromTypedToTypedEqual(t, map[string]string{ + "a": "a", + "b": "b", + "c": "", + }) + assertFromTypedToTypedEqual(t, map[string]int{ + "a": 1, + "b": 0, + "c": 2, + }) + }) } diff --git a/libs/dyn/convert/from_typed.go b/libs/dyn/convert/from_typed.go index 75f1c7212..cd92ad0eb 100644 --- a/libs/dyn/convert/from_typed.go +++ b/libs/dyn/convert/from_typed.go @@ -6,25 +6,30 @@ import ( "slices" "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" ) type fromTypedOptions int const ( - // Use the zero value instead of setting zero values to nil. This is useful - // for types where the zero values and nil are semantically different. That is - // strings, bools, ints, floats. + // If this flag is set, zero values in the typed representation are resolved to + // the equivalent zero value in the dynamic representation. + // If it is not set, zero values resolve to [dyn.NilValue]. // - // Note: this is not needed for structs because dyn.NilValue is converted back - // to a zero value when using the convert.ToTyped function. - // - // Values in maps and slices should be set to zero values, and not nil in the - // dynamic representation. + // This flag exists to reconcile default values in Go being zero values with values + // being intentionally set to their zero value. We capture zero values in the dynamic + // configuration if they are 1) behind a pointer, 2) a map value, 3) a slice element, + // in the typed configuration. includeZeroValues fromTypedOptions = 1 << iota ) // FromTyped converts changes made in the typed structure w.r.t. the configuration value // back to the configuration value, retaining existing location information where possible. +// +// It uses the reference value both for location information and to determine if the typed +// value was changed or not. For example, if a struct-by-value field is nil in the reference +// it will be zero-valued in the typed configuration. If it remains zero-valued, this +// this function will still emit a nil value in the dynamic representation. func FromTyped(src any, ref dyn.Value) (dyn.Value, error) { return fromTyped(src, ref) } @@ -37,67 +42,117 @@ func fromTyped(src any, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, // Dereference pointer if necessary for srcv.Kind() == reflect.Pointer { if srcv.IsNil() { - return dyn.NilValue, nil + return dyn.NilValue.WithLocations(ref.Locations()), nil } srcv = srcv.Elem() + + // If a pointer to a type points to a zero value, we should include + // that zero value in the dynamic representation. + // This is because by default a pointer is nil in Go, and it not being nil + // indicates its value was intentionally set to zero. + if !slices.Contains(options, includeZeroValues) { + options = append(options, includeZeroValues) + } } + var v dyn.Value + var err error switch srcv.Kind() { case reflect.Struct: - return fromTypedStruct(srcv, ref) + v, err = fromTypedStruct(srcv, ref, options...) case reflect.Map: - return fromTypedMap(srcv, ref) + v, err = fromTypedMap(srcv, ref) case reflect.Slice: - return fromTypedSlice(srcv, ref) + v, err = fromTypedSlice(srcv, ref) case reflect.String: - return fromTypedString(srcv, ref, options...) + v, err = fromTypedString(srcv, ref, options...) case reflect.Bool: - return fromTypedBool(srcv, ref, options...) + v, err = fromTypedBool(srcv, ref, options...) case reflect.Int, reflect.Int32, reflect.Int64: - return fromTypedInt(srcv, ref, options...) + v, err = fromTypedInt(srcv, ref, options...) case reflect.Float32, reflect.Float64: - return fromTypedFloat(srcv, ref, options...) + v, err = fromTypedFloat(srcv, ref, options...) + case reflect.Invalid: + // If the value is untyped and not set (e.g. any type with nil value), we return nil. + v, err = dyn.NilValue, nil + default: + return dyn.InvalidValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) } - return dyn.NilValue, fmt.Errorf("unsupported type: %s", srcv.Kind()) + // Ensure the location metadata is retained. + if err != nil { + return dyn.InvalidValue, err + } + return v.WithLocations(ref.Locations()), err } -func fromTypedStruct(src reflect.Value, ref dyn.Value) (dyn.Value, error) { +func fromTypedStruct(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } case dyn.KindMap, dyn.KindNil: default: - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } - out := make(map[string]dyn.Value) + refm, _ := ref.AsMap() + out := dyn.NewMapping() info := getStructInfo(src.Type()) for k, v := range info.FieldValues(src) { + pair, ok := refm.GetPairByString(k) + refk := pair.Key + refv := pair.Value + + // Use nil reference if there is no reference for this key + if !ok { + refk = dyn.V(k) + refv = dyn.NilValue + } + + var options []fromTypedOptions + if v.Kind() == reflect.Interface { + options = append(options, includeZeroValues) + } + // Convert the field taking into account the reference value (may be equal to config.NilValue). - nv, err := fromTyped(v.Interface(), ref.Get(k)) + nv, err := fromTyped(v.Interface(), refv, options...) if err != nil { - return dyn.Value{}, err + return dyn.InvalidValue, err } - if nv != dyn.NilValue { - out[k] = nv + // Either if the key was set in the reference or the field is not zero-valued, we include it. + if ok || nv.Kind() != dyn.KindNil { + out.Set(refk, nv) } } - // If the struct was equal to its zero value, emit a nil. - if len(out) == 0 { - return dyn.NilValue, nil + // Return the new mapping if: + // 1. The mapping has entries (i.e. the struct was not empty). + // 2. The reference is a map (i.e. the struct was and still is empty). + // 3. The "includeZeroValues" option is set (i.e. the struct is a non-nil pointer). + if out.Len() > 0 || ref.Kind() == dyn.KindMap || slices.Contains(options, includeZeroValues) { + return dyn.V(out), nil } - return dyn.NewValue(out, ref.Location()), nil + // Otherwise, return nil. + return dyn.NilValue, nil } func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } case dyn.KindMap, dyn.KindNil: default: - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } // Return nil if the map is nil. @@ -105,32 +160,47 @@ func fromTypedMap(src reflect.Value, ref dyn.Value) (dyn.Value, error) { return dyn.NilValue, nil } - out := make(map[string]dyn.Value) + refm, _ := ref.AsMap() + out := dyn.NewMapping() iter := src.MapRange() for iter.Next() { k := iter.Key().String() v := iter.Value() + pair, ok := refm.GetPairByString(k) + refk := pair.Key + refv := pair.Value + + // Use nil reference if there is no reference for this key + if !ok { + refk = dyn.V(k) + refv = dyn.NilValue + } // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), ref.Get(k), includeZeroValues) + nv, err := fromTyped(v.Interface(), refv, includeZeroValues) if err != nil { - return dyn.Value{}, err + return dyn.InvalidValue, err } // Every entry is represented, even if it is a nil. // Otherwise, a map with zero-valued structs would yield a nil as well. - out[k] = nv + out.Set(refk, nv) } - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { // Check that the reference value is compatible or nil. switch ref.Kind() { + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } case dyn.KindSequence, dyn.KindNil: default: - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } // Return nil if the slice is nil. @@ -141,17 +211,23 @@ func fromTypedSlice(src reflect.Value, ref dyn.Value) (dyn.Value, error) { out := make([]dyn.Value, src.Len()) for i := 0; i < src.Len(); i++ { v := src.Index(i) + refv := ref.Index(i) + + // Use nil reference if there is no reference for this index. + if refv.Kind() == dyn.KindInvalid { + refv = dyn.NilValue + } // Convert entry taking into account the reference value (may be equal to dyn.NilValue). - nv, err := fromTyped(v.Interface(), ref.Index(i), includeZeroValues) + nv, err := fromTyped(v.Interface(), refv, includeZeroValues) if err != nil { - return dyn.Value{}, err + return dyn.InvalidValue, err } out[i] = nv } - return dyn.NewValue(out, ref.Location()), nil + return dyn.V(out), nil } func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -172,7 +248,7 @@ func fromTypedString(src reflect.Value, ref dyn.Value, options ...fromTypedOptio return dyn.V(src.String()), nil } - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } func fromTypedBool(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -190,9 +266,14 @@ func fromTypedBool(src reflect.Value, ref dyn.Value, options ...fromTypedOptions return dyn.NilValue, nil } return dyn.V(src.Bool()), nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } } - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } func fromTypedInt(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -210,9 +291,14 @@ func fromTypedInt(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) return dyn.NilValue, nil } return dyn.V(src.Int()), nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } } - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } func fromTypedFloat(src reflect.Value, ref dyn.Value, options ...fromTypedOptions) (dyn.Value, error) { @@ -230,7 +316,12 @@ func fromTypedFloat(src reflect.Value, ref dyn.Value, options ...fromTypedOption return dyn.NilValue, nil } return dyn.V(src.Float()), nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(ref.MustString()) { + return ref, nil + } } - return dyn.Value{}, fmt.Errorf("unhandled type: %s", ref.Kind()) + return dyn.InvalidValue, fmt.Errorf("unhandled type: %s", ref.Kind()) } diff --git a/libs/dyn/convert/from_typed_test.go b/libs/dyn/convert/from_typed_test.go index d7fa60bb3..0cddff3be 100644 --- a/libs/dyn/convert/from_typed_test.go +++ b/libs/dyn/convert/from_typed_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) @@ -15,11 +15,72 @@ func TestFromTypedStructZeroFields(t *testing.T) { } src := Tmp{} - ref := dyn.NilValue + + // For an empty struct with a nil reference we expect a nil. + nv, err := FromTyped(src, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + + // For an empty struct with a non-nil reference we expect an empty map. + nv, err = FromTyped(src, dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) +} + +func TestFromTypedStructPointerZeroFields(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + var src *Tmp + var nv dyn.Value + var err error + + // For a nil pointer with a nil reference we expect a nil. + src = nil + nv, err = FromTyped(src, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + + // For a nil pointer with a non-nil reference we expect a nil. + src = nil + nv, err = FromTyped(src, dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + + // For an initialized pointer with a nil reference we expect an empty map. + src = &Tmp{} + nv, err = FromTyped(src, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) + + // For an initialized pointer with a non-nil reference we expect an empty map. + src = &Tmp{} + nv, err = FromTyped(src, dyn.V(map[string]dyn.Value{})) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{}), nv) +} + +func TestFromTypedStructNilFields(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + // For a zero value struct with a reference containing nil fields we expect the nils to be retained. + src := Tmp{} + ref := dyn.V(map[string]dyn.Value{ + "foo": dyn.NilValue, + "bar": dyn.NilValue, + }) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NilValue, nv) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "foo": dyn.NilValue, + "bar": dyn.NilValue, + }), nv) } func TestFromTypedStructSetFields(t *testing.T) { @@ -42,7 +103,7 @@ func TestFromTypedStructSetFields(t *testing.T) { }), nv) } -func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedStructSetFieldsRetainLocation(t *testing.T) { type Tmp struct { Foo string `json:"foo"` Bar string `json:"bar"` @@ -54,18 +115,16 @@ func TestFromTypedStructSetFieldsRetainLocationIfUnchanged(t *testing.T) { } ref := dyn.V(map[string]dyn.Value{ - "foo": dyn.NewValue("bar", dyn.Location{File: "foo"}), - "bar": dyn.NewValue("baz", dyn.Location{File: "bar"}), + "foo": dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), + "bar": dyn.NewValue("baz", []dyn.Location{{File: "bar"}}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. - assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) + // Assert foo and bar have retained their location. + assert.Equal(t, dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), nv.Get("foo")) + assert.Equal(t, dyn.NewValue("qux", []dyn.Location{{File: "bar"}}), nv.Get("bar")) } func TestFromTypedStringMapWithZeroValue(t *testing.T) { @@ -293,25 +352,23 @@ func TestFromTypedMapNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedMapNonEmptyRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedMapNonEmptyRetainLocation(t *testing.T) { var src = map[string]string{ "foo": "bar", "bar": "qux", } ref := dyn.V(map[string]dyn.Value{ - "foo": dyn.NewValue("bar", dyn.Location{File: "foo"}), - "bar": dyn.NewValue("baz", dyn.Location{File: "bar"}), + "foo": dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), + "bar": dyn.NewValue("baz", []dyn.Location{{File: "bar"}}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. - assert.Equal(t, dyn.NewValue("bar", dyn.Location{File: "foo"}), nv.Get("foo")) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("qux", dyn.Location{}), nv.Get("bar")) + // Assert foo and bar have retained their locations. + assert.Equal(t, dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), nv.Get("foo")) + assert.Equal(t, dyn.NewValue("qux", []dyn.Location{{File: "bar"}}), nv.Get("bar")) } func TestFromTypedMapFieldWithZeroValue(t *testing.T) { @@ -368,25 +425,23 @@ func TestFromTypedSliceNonEmpty(t *testing.T) { }), nv) } -func TestFromTypedSliceNonEmptyRetainLocationIfUnchanged(t *testing.T) { +func TestFromTypedSliceNonEmptyRetainLocation(t *testing.T) { var src = []string{ "foo", "bar", } ref := dyn.V([]dyn.Value{ - dyn.NewValue("foo", dyn.Location{File: "foo"}), - dyn.NewValue("baz", dyn.Location{File: "baz"}), + dyn.NewValue("foo", []dyn.Location{{File: "foo"}}), + dyn.NewValue("bar", []dyn.Location{{File: "bar"}}), }) nv, err := FromTyped(src, ref) require.NoError(t, err) - // Assert foo has retained its location. - assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv.Index(0)) - - // Assert bar lost its location (because it was overwritten). - assert.Equal(t, dyn.NewValue("bar", dyn.Location{}), nv.Index(1)) + // Assert foo and bar have retained their locations. + assert.Equal(t, dyn.NewValue("foo", []dyn.Location{{File: "foo"}}), nv.Index(0)) + assert.Equal(t, dyn.NewValue("bar", []dyn.Location{{File: "bar"}}), nv.Index(1)) } func TestFromTypedStringEmpty(t *testing.T) { @@ -421,12 +476,20 @@ func TestFromTypedStringNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V("new"), nv) } -func TestFromTypedStringRetainsLocationsIfUnchanged(t *testing.T) { +func TestFromTypedStringRetainsLocations(t *testing.T) { + var ref = dyn.NewValue("foo", []dyn.Location{{File: "foo"}}) + + // case: value has not been changed var src string = "foo" - var ref = dyn.NewValue("foo", dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue("foo", dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue("foo", []dyn.Location{{File: "foo"}}), nv) + + // case: value has been changed + src = "bar" + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue("bar", []dyn.Location{{File: "foo"}}), nv) } func TestFromTypedStringTypeError(t *testing.T) { @@ -468,12 +531,28 @@ func TestFromTypedBoolNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(true), nv) } -func TestFromTypedBoolRetainsLocationsIfUnchanged(t *testing.T) { +func TestFromTypedBoolRetainsLocations(t *testing.T) { + var ref = dyn.NewValue(true, []dyn.Location{{File: "foo"}}) + + // case: value has not been changed var src bool = true - var ref = dyn.NewValue(true, dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(true, dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(true, []dyn.Location{{File: "foo"}}), nv) + + // case: value has been changed + src = false + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(false, []dyn.Location{{File: "foo"}}), nv) +} + +func TestFromTypedBoolVariableReference(t *testing.T) { + var src bool = true + var ref = dyn.V("${var.foo}") + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V("${var.foo}"), nv) } func TestFromTypedBoolTypeError(t *testing.T) { @@ -515,12 +594,28 @@ func TestFromTypedIntNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(int64(1234)), nv) } -func TestFromTypedIntRetainsLocationsIfUnchanged(t *testing.T) { +func TestFromTypedIntRetainsLocations(t *testing.T) { + var ref = dyn.NewValue(1234, []dyn.Location{{File: "foo"}}) + + // case: value has not been changed var src int = 1234 - var ref = dyn.NewValue(1234, dyn.Location{File: "foo"}) nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(1234, dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(1234, []dyn.Location{{File: "foo"}}), nv) + + // case: value has been changed + src = 1235 + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(int64(1235), []dyn.Location{{File: "foo"}}), nv) +} + +func TestFromTypedIntVariableReference(t *testing.T) { + var src int = 1234 + var ref = dyn.V("${var.foo}") + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V("${var.foo}"), nv) } func TestFromTypedIntTypeError(t *testing.T) { @@ -562,12 +657,29 @@ func TestFromTypedFloatNonEmptyOverwrite(t *testing.T) { assert.Equal(t, dyn.V(1.23), nv) } -func TestFromTypedFloatRetainsLocationsIfUnchanged(t *testing.T) { - var src float64 = 1.23 - var ref = dyn.NewValue(1.23, dyn.Location{File: "foo"}) +func TestFromTypedFloatRetainsLocations(t *testing.T) { + var src float64 + var ref = dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}) + + // case: value has not been changed + src = 1.23 nv, err := FromTyped(src, ref) require.NoError(t, err) - assert.Equal(t, dyn.NewValue(1.23, dyn.Location{File: "foo"}), nv) + assert.Equal(t, dyn.NewValue(1.23, []dyn.Location{{File: "foo"}}), nv) + + // case: value has been changed + src = 1.24 + nv, err = FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(1.24, []dyn.Location{{File: "foo"}}), nv) +} + +func TestFromTypedFloatVariableReference(t *testing.T) { + var src float64 = 1.23 + var ref = dyn.V("${var.foo}") + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V("${var.foo}"), nv) } func TestFromTypedFloatTypeError(t *testing.T) { @@ -576,3 +688,79 @@ func TestFromTypedFloatTypeError(t *testing.T) { _, err := FromTyped(src, ref) require.Error(t, err) } + +func TestFromTypedAny(t *testing.T) { + type Tmp struct { + Foo any `json:"foo"` + Bar any `json:"bar"` + Foz any `json:"foz"` + Baz any `json:"baz"` + } + + src := Tmp{ + Foo: "foo", + Bar: false, + Foz: 0, + Baz: map[string]any{ + "foo": "foo", + "bar": 1234, + "qux": 0, + "nil": nil, + }, + } + + ref := dyn.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "foo": dyn.V("foo"), + "bar": dyn.V(false), + "foz": dyn.V(int64(0)), + "baz": dyn.V(map[string]dyn.Value{ + "foo": dyn.V("foo"), + "bar": dyn.V(int64(1234)), + "qux": dyn.V(int64(0)), + "nil": dyn.V(nil), + }), + }), nv) +} + +func TestFromTypedAnyNil(t *testing.T) { + var src any = nil + var ref = dyn.NilValue + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) +} + +func TestFromTypedNilPointerRetainsLocations(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar"` + } + + var src *Tmp + ref := dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}), nv) +} + +func TestFromTypedNilMapRetainsLocation(t *testing.T) { + var src map[string]string + ref := dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}), nv) +} + +func TestFromTypedNilSliceRetainsLocation(t *testing.T) { + var src []string + ref := dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}) + + nv, err := FromTyped(src, ref) + require.NoError(t, err) + assert.Equal(t, dyn.NewValue(nil, []dyn.Location{{File: "foobar"}}), nv) +} diff --git a/libs/dyn/convert/normalize.go b/libs/dyn/convert/normalize.go index 7a652cbc7..246c97eaf 100644 --- a/libs/dyn/convert/normalize.go +++ b/libs/dyn/convert/normalize.go @@ -3,117 +3,222 @@ package convert import ( "fmt" "reflect" + "slices" "strconv" "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" ) -func Normalize(dst any, src dyn.Value) (dyn.Value, diag.Diagnostics) { - return normalizeType(reflect.TypeOf(dst), src) +// NormalizeOption is the type for options that can be passed to Normalize. +type NormalizeOption int + +const ( + // IncludeMissingFields causes the normalization to include fields that defined on the given + // type but are missing in the source value. They are included with their zero values. + IncludeMissingFields NormalizeOption = iota +) + +type normalizeOptions struct { + includeMissingFields bool } -func normalizeType(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func Normalize(dst any, src dyn.Value, opts ...NormalizeOption) (dyn.Value, diag.Diagnostics) { + var n normalizeOptions + for _, opt := range opts { + switch opt { + case IncludeMissingFields: + n.includeMissingFields = true + } + } + + return n.normalizeType(reflect.TypeOf(dst), src, []reflect.Type{}, dyn.EmptyPath) +} + +func (n normalizeOptions) normalizeType(typ reflect.Type, src dyn.Value, seen []reflect.Type, path dyn.Path) (dyn.Value, diag.Diagnostics) { for typ.Kind() == reflect.Pointer { typ = typ.Elem() } switch typ.Kind() { case reflect.Struct: - return normalizeStruct(typ, src) + return n.normalizeStruct(typ, src, append(seen, typ), path) case reflect.Map: - return normalizeMap(typ, src) + return n.normalizeMap(typ, src, append(seen, typ), path) case reflect.Slice: - return normalizeSlice(typ, src) + return n.normalizeSlice(typ, src, append(seen, typ), path) case reflect.String: - return normalizeString(typ, src) + return n.normalizeString(typ, src, path) case reflect.Bool: - return normalizeBool(typ, src) + return n.normalizeBool(typ, src, path) case reflect.Int, reflect.Int32, reflect.Int64: - return normalizeInt(typ, src) + return n.normalizeInt(typ, src, path) case reflect.Float32, reflect.Float64: - return normalizeFloat(typ, src) + return n.normalizeFloat(typ, src, path) + case reflect.Interface: + return n.normalizeInterface(typ, src, path) } - return dyn.NilValue, diag.Errorf("unsupported type: %s", typ.Kind()) + return dyn.InvalidValue, diag.Errorf("unsupported type: %s", typ.Kind()) } -func typeMismatch(expected dyn.Kind, src dyn.Value) diag.Diagnostic { +func nullWarning(expected dyn.Kind, src dyn.Value, path dyn.Path) diag.Diagnostic { return diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, + Summary: fmt.Sprintf("expected a %s value, found null", expected), + Location: src.Location(), + Path: path, + } +} + +func typeMismatch(expected dyn.Kind, src dyn.Value, path dyn.Path) diag.Diagnostic { + return diag.Diagnostic{ + Severity: diag.Warning, Summary: fmt.Sprintf("expected %s, found %s", expected, src.Kind()), Location: src.Location(), + Path: path, } } -func normalizeStruct(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeStruct(typ reflect.Type, src dyn.Value, seen []reflect.Type, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { case dyn.KindMap: - out := make(map[string]dyn.Value) + out := dyn.NewMapping() info := getStructInfo(typ) - for k, v := range src.MustMap() { - index, ok := info.Fields[k] + for _, pair := range src.MustMap().Pairs() { + pk := pair.Key + pv := pair.Value + + index, ok := info.Fields[pk.MustString()] if !ok { - diags = diags.Append(diag.Diagnostic{ - Severity: diag.Warning, - Summary: fmt.Sprintf("unknown field: %s", k), - Location: src.Location(), - }) + if !pv.IsAnchor() { + diags = diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf("unknown field: %s", pk.MustString()), + Location: pk.Location(), + Path: path, + }) + } continue } // Normalize the value according to the field type. - v, err := normalizeType(typ.FieldByIndex(index).Type, v) + nv, err := n.normalizeType(typ.FieldByIndex(index).Type, pv, seen, path.Append(dyn.Key(pk.MustString()))) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. - if err.HasError() { + if !nv.IsValid() { continue } } - out[k] = v + out.Set(pk, nv) } - return dyn.NewValue(out, src.Location()), diags + // Return the normalized value if missing fields are not included. + if !n.includeMissingFields { + return dyn.NewValue(out, src.Locations()), diags + } + + // Populate missing fields with their zero values. + for k, index := range info.Fields { + if _, ok := out.GetByString(k); ok { + continue + } + + // Optionally dereference pointers to get the underlying field type. + ftyp := typ.FieldByIndex(index).Type + for ftyp.Kind() == reflect.Pointer { + ftyp = ftyp.Elem() + } + + // Skip field if we have already seen its type to avoid infinite recursion + // when filling in the zero value of a recursive type. + if slices.Contains(seen, ftyp) { + continue + } + + var v dyn.Value + switch ftyp.Kind() { + case reflect.Struct, reflect.Map: + v, _ = n.normalizeType(ftyp, dyn.V(map[string]dyn.Value{}), seen, path.Append(dyn.Key(k))) + case reflect.Slice: + v, _ = n.normalizeType(ftyp, dyn.V([]dyn.Value{}), seen, path.Append(dyn.Key(k))) + case reflect.String: + v, _ = n.normalizeType(ftyp, dyn.V(""), seen, path.Append(dyn.Key(k))) + case reflect.Bool: + v, _ = n.normalizeType(ftyp, dyn.V(false), seen, path.Append(dyn.Key(k))) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v, _ = n.normalizeType(ftyp, dyn.V(int64(0)), seen, path.Append(dyn.Key(k))) + case reflect.Float32, reflect.Float64: + v, _ = n.normalizeType(ftyp, dyn.V(float64(0)), seen, path.Append(dyn.Key(k))) + default: + // Skip fields for which we do not have a natural [dyn.Value] equivalent. + // For example, we don't handle reflect.Complex* and reflect.Uint* types. + continue + } + if v.IsValid() { + out.Set(dyn.V(k), v) + } + } + + return dyn.NewValue(out, src.Locations()), diags case dyn.KindNil: return src, diags + + case dyn.KindString: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } } - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindMap, src)) + // Cannot interpret as a struct. + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src, path)) } -func normalizeMap(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeMap(typ reflect.Type, src dyn.Value, seen []reflect.Type, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { case dyn.KindMap: - out := make(map[string]dyn.Value) - for k, v := range src.MustMap() { + out := dyn.NewMapping() + for _, pair := range src.MustMap().Pairs() { + pk := pair.Key + pv := pair.Value + // Normalize the value according to the map element type. - v, err := normalizeType(typ.Elem(), v) + nv, err := n.normalizeType(typ.Elem(), pv, seen, path.Append(dyn.Key(pk.MustString()))) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. - if err.HasError() { + if !nv.IsValid() { continue } } - out[k] = v + out.Set(pk, nv) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags case dyn.KindNil: return src, diags + + case dyn.KindString: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } } - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindMap, src)) + // Cannot interpret as a map. + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindMap, src, path)) } -func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeSlice(typ reflect.Type, src dyn.Value, seen []reflect.Type, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics switch src.Kind() { @@ -121,11 +226,11 @@ func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostic out := make([]dyn.Value, 0, len(src.MustSequence())) for _, v := range src.MustSequence() { // Normalize the value according to the slice element type. - v, err := normalizeType(typ.Elem(), v) + v, err := n.normalizeType(typ.Elem(), v, seen, path.Append(dyn.Index(len(out)))) if err != nil { diags = diags.Extend(err) // Skip the element if it cannot be normalized. - if err.HasError() { + if !v.IsValid() { continue } } @@ -133,15 +238,22 @@ func normalizeSlice(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostic out = append(out, v) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags case dyn.KindNil: return src, diags + + case dyn.KindString: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } } - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindSequence, src)) + // Cannot interpret as a slice. + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindSequence, src, path)) } -func normalizeString(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeString(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out string @@ -154,14 +266,17 @@ func normalizeString(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnosti out = strconv.FormatInt(src.MustInt(), 10) case dyn.KindFloat: out = strconv.FormatFloat(src.MustFloat(), 'f', -1, 64) + case dyn.KindNil: + // Return a warning if the field is present but has a null value. + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindString, src, path)) default: - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindString, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindString, src, path)) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags } -func normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeBool(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out bool @@ -176,60 +291,110 @@ func normalizeBool(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics case "false", "n", "N", "no", "No", "NO", "off", "Off", "OFF": out = false default: + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } + // Cannot interpret as a boolean. - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindBool, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src, path)) } + case dyn.KindNil: + // Return a warning if the field is present but has a null value. + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindBool, src, path)) default: - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindBool, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindBool, src, path)) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags } -func normalizeInt(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeInt(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out int64 switch src.Kind() { case dyn.KindInt: out = src.MustInt() + case dyn.KindFloat: + out = int64(src.MustFloat()) + if src.MustFloat() != float64(out) { + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf(`cannot accurately represent "%g" as integer due to precision loss`, src.MustFloat()), + Location: src.Location(), + Path: path, + }) + } case dyn.KindString: var err error out, err = strconv.ParseInt(src.MustString(), 10, 64) if err != nil { - return dyn.NilValue, diags.Append(diag.Diagnostic{ - Severity: diag.Error, + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } + + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ + Severity: diag.Warning, Summary: fmt.Sprintf("cannot parse %q as an integer", src.MustString()), Location: src.Location(), + Path: path, }) } + case dyn.KindNil: + // Return a warning if the field is present but has a null value. + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindInt, src, path)) default: - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindInt, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindInt, src, path)) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags } -func normalizeFloat(typ reflect.Type, src dyn.Value) (dyn.Value, diag.Diagnostics) { +func (n normalizeOptions) normalizeFloat(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { var diags diag.Diagnostics var out float64 switch src.Kind() { case dyn.KindFloat: out = src.MustFloat() + case dyn.KindInt: + out = float64(src.MustInt()) + if src.MustInt() != int64(out) { + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ + Severity: diag.Warning, + Summary: fmt.Sprintf(`cannot accurately represent "%d" as floating point number due to precision loss`, src.MustInt()), + Location: src.Location(), + Path: path, + }) + } case dyn.KindString: var err error out, err = strconv.ParseFloat(src.MustString(), 64) if err != nil { - return dyn.NilValue, diags.Append(diag.Diagnostic{ - Severity: diag.Error, + // Return verbatim if it's a pure variable reference. + if dynvar.IsPureVariableReference(src.MustString()) { + return src, nil + } + + return dyn.InvalidValue, diags.Append(diag.Diagnostic{ + Severity: diag.Warning, Summary: fmt.Sprintf("cannot parse %q as a floating point number", src.MustString()), Location: src.Location(), + Path: path, }) } + case dyn.KindNil: + // Return a warning if the field is present but has a null value. + return dyn.InvalidValue, diags.Append(nullWarning(dyn.KindFloat, src, path)) default: - return dyn.NilValue, diags.Append(typeMismatch(dyn.KindFloat, src)) + return dyn.InvalidValue, diags.Append(typeMismatch(dyn.KindFloat, src, path)) } - return dyn.NewValue(out, src.Location()), diags + return dyn.NewValue(out, src.Locations()), diags +} + +func (n normalizeOptions) normalizeInterface(typ reflect.Type, src dyn.Value, path dyn.Path) (dyn.Value, diag.Diagnostics) { + return src, nil } diff --git a/libs/dyn/convert/normalize_test.go b/libs/dyn/convert/normalize_test.go index 13b1ed52f..452ed4eb1 100644 --- a/libs/dyn/convert/normalize_test.go +++ b/libs/dyn/convert/normalize_test.go @@ -5,7 +5,7 @@ import ( "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestNormalizeStruct(t *testing.T) { @@ -40,9 +40,10 @@ func TestNormalizeStructElementDiagnostic(t *testing.T) { vout, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected string, found map`, Location: dyn.Location{}, + Path: dyn.NewPath(dyn.Key("bar")), }, err[0]) // Elements that encounter an error during normalization are dropped. @@ -68,6 +69,7 @@ func TestNormalizeStructUnknownField(t *testing.T) { Severity: diag.Warning, Summary: `unknown field: bar`, Location: vin.Get("foo").Location(), + Path: dyn.EmptyPath, }, err[0]) // The field that can be mapped to the struct field is retained. @@ -98,9 +100,172 @@ func TestNormalizeStructError(t *testing.T) { _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected map, found string`, Location: vin.Get("foo").Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeStructNestedError(t *testing.T) { + type Nested struct { + F1 int `json:"f1"` + F2 int `json:"f2"` + } + type Tmp struct { + Foo Nested `json:"foo"` + Bar Nested `json:"bar"` + } + + var typ Tmp + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "f1": dyn.V("error"), + "f2": dyn.V(1), + }), + "bar": dyn.V(map[string]dyn.Value{ + "f1": dyn.V(1), + "f2": dyn.V("error"), + }), + }) + vout, err := Normalize(typ, vin) + assert.Len(t, err, 2) + + // Verify that valid fields are retained. + assert.Equal(t, + dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "f2": dyn.V(int64(1)), + }), + "bar": dyn.V(map[string]dyn.Value{ + "f1": dyn.V(int64(1)), + }), + }), + vout, + ) +} + +func TestNormalizeStructIncludeMissingFields(t *testing.T) { + type Nested struct { + String string `json:"string"` + } + + type Tmp struct { + // Verify that fields that are already set in the dynamic value are not overridden. + Existing string `json:"existing"` + + // Verify that structs are recursively normalized if not set. + Nested Nested `json:"nested"` + Ptr *Nested `json:"ptr"` + + // Verify that containers are also zero-initialized if not set. + Map map[string]string `json:"map"` + Slice []string `json:"slice"` + + // Verify that primitive types are zero-initialized if not set. + String string `json:"string"` + Bool bool `json:"bool"` + Int int `json:"int"` + Float float64 `json:"float"` + } + + var typ Tmp + vin := dyn.V(map[string]dyn.Value{ + "existing": dyn.V("already set"), + }) + vout, err := Normalize(typ, vin, IncludeMissingFields) + assert.Empty(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "existing": dyn.V("already set"), + "nested": dyn.V(map[string]dyn.Value{ + "string": dyn.V(""), + }), + "ptr": dyn.V(map[string]dyn.Value{ + "string": dyn.V(""), + }), + "map": dyn.V(map[string]dyn.Value{}), + "slice": dyn.V([]dyn.Value{}), + "string": dyn.V(""), + "bool": dyn.V(false), + "int": dyn.V(int64(0)), + "float": dyn.V(float64(0)), + }), vout) +} + +func TestNormalizeStructIncludeMissingFieldsOnRecursiveType(t *testing.T) { + type Tmp struct { + // Verify that structs are recursively normalized if not set. + Ptr *Tmp `json:"ptr"` + + // Verify that primitive types are zero-initialized if not set. + String string `json:"string"` + } + + var typ Tmp + vin := dyn.V(map[string]dyn.Value{ + "ptr": dyn.V(map[string]dyn.Value{ + "ptr": dyn.V(map[string]dyn.Value{ + "string": dyn.V("already set"), + }), + }), + }) + vout, err := Normalize(typ, vin, IncludeMissingFields) + assert.Empty(t, err) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "ptr": dyn.V(map[string]dyn.Value{ + "ptr": dyn.V(map[string]dyn.Value{ + // Note: the ptr field is not zero-initialized because that would recurse. + "string": dyn.V("already set"), + }), + "string": dyn.V(""), + }), + "string": dyn.V(""), + }), vout) +} + +func TestNormalizeStructVariableReference(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.NewValue("${var.foo}", []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeStructRandomStringError(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.NewValue("var foo", []dyn.Location{{File: "file", Line: 1, Column: 1}}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found string`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeStructIntError(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.NewValue(1, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found int`, + Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -126,9 +291,10 @@ func TestNormalizeMapElementDiagnostic(t *testing.T) { vout, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected string, found map`, Location: dyn.Location{}, + Path: dyn.NewPath(dyn.Key("bar")), }, err[0]) // Elements that encounter an error during normalization are dropped. @@ -151,9 +317,78 @@ func TestNormalizeMapError(t *testing.T) { _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected map, found string`, Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeMapNestedError(t *testing.T) { + type Nested struct { + F1 int `json:"f1"` + F2 int `json:"f2"` + } + + var typ map[string]Nested + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "f1": dyn.V("error"), + "f2": dyn.V(1), + }), + "bar": dyn.V(map[string]dyn.Value{ + "f1": dyn.V(1), + "f2": dyn.V("error"), + }), + }) + vout, err := Normalize(typ, vin) + assert.Len(t, err, 2) + + // Verify that valid fields are retained. + assert.Equal(t, + dyn.V(map[string]dyn.Value{ + "foo": dyn.V(map[string]dyn.Value{ + "f2": dyn.V(int64(1)), + }), + "bar": dyn.V(map[string]dyn.Value{ + "f1": dyn.V(int64(1)), + }), + }), + vout, + ) +} + +func TestNormalizeMapVariableReference(t *testing.T) { + var typ map[string]string + vin := dyn.NewValue("${var.foo}", []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeMapRandomStringError(t *testing.T) { + var typ map[string]string + vin := dyn.NewValue("var foo", []dyn.Location{{File: "file", Line: 1, Column: 1}}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found string`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeMapIntError(t *testing.T) { + var typ map[string]string + vin := dyn.NewValue(1, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected map, found int`, + Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -180,9 +415,10 @@ func TestNormalizeSliceElementDiagnostic(t *testing.T) { vout, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected string, found map`, Location: dyn.Location{}, + Path: dyn.NewPath(dyn.Index(2)), }, err[0]) // Elements that encounter an error during normalization are dropped. @@ -203,9 +439,78 @@ func TestNormalizeSliceError(t *testing.T) { _, err := Normalize(typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected sequence, found string`, Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeSliceNestedError(t *testing.T) { + type Nested struct { + F1 int `json:"f1"` + F2 int `json:"f2"` + } + + var typ []Nested + vin := dyn.V([]dyn.Value{ + dyn.V(map[string]dyn.Value{ + "f1": dyn.V("error"), + "f2": dyn.V(1), + }), + dyn.V(map[string]dyn.Value{ + "f1": dyn.V(1), + "f2": dyn.V("error"), + }), + }) + vout, err := Normalize(typ, vin) + assert.Len(t, err, 2) + + // Verify that valid fields are retained. + assert.Equal(t, + dyn.V([]dyn.Value{ + dyn.V(map[string]dyn.Value{ + "f2": dyn.V(int64(1)), + }), + dyn.V(map[string]dyn.Value{ + "f1": dyn.V(int64(1)), + }), + }), + vout, + ) +} + +func TestNormalizeSliceVariableReference(t *testing.T) { + var typ []string + vin := dyn.NewValue("${var.foo}", []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + +func TestNormalizeSliceRandomStringError(t *testing.T) { + var typ []string + vin := dyn.NewValue("var foo", []dyn.Location{{File: "file", Line: 1, Column: 1}}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected sequence, found string`, + Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeSliceIntError(t *testing.T) { + var typ []string + vin := dyn.NewValue(1, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + _, err := Normalize(typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `expected sequence, found int`, + Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -219,38 +524,39 @@ func TestNormalizeString(t *testing.T) { func TestNormalizeStringNil(t *testing.T) { var typ string - vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, - Summary: `expected string, found nil`, + Severity: diag.Warning, + Summary: `expected a string value, found null`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } func TestNormalizeStringFromBool(t *testing.T) { var typ string - vin := dyn.NewValue(true, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(true, []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, dyn.NewValue("true", vin.Location()), vout) + assert.Equal(t, dyn.NewValue("true", vin.Locations()), vout) } func TestNormalizeStringFromInt(t *testing.T) { var typ string - vin := dyn.NewValue(123, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(123, []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, dyn.NewValue("123", vin.Location()), vout) + assert.Equal(t, dyn.NewValue("123", vin.Locations()), vout) } func TestNormalizeStringFromFloat(t *testing.T) { var typ string - vin := dyn.NewValue(1.20, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(1.20, []dyn.Location{{File: "file", Line: 1, Column: 1}}) vout, err := Normalize(&typ, vin) assert.Empty(t, err) - assert.Equal(t, dyn.NewValue("1.2", vin.Location()), vout) + assert.Equal(t, dyn.NewValue("1.2", vin.Locations()), vout) } func TestNormalizeStringError(t *testing.T) { @@ -259,9 +565,10 @@ func TestNormalizeStringError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected string, found map`, Location: dyn.Location{}, + Path: dyn.EmptyPath, }, err[0]) } @@ -275,13 +582,14 @@ func TestNormalizeBool(t *testing.T) { func TestNormalizeBoolNil(t *testing.T) { var typ bool - vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, - Summary: `expected bool, found nil`, + Severity: diag.Warning, + Summary: `expected a bool value, found null`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -306,15 +614,24 @@ func TestNormalizeBoolFromString(t *testing.T) { } } +func TestNormalizeBoolFromStringVariableReference(t *testing.T) { + var typ bool + vin := dyn.V("${var.foo}") + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + func TestNormalizeBoolFromStringError(t *testing.T) { var typ bool vin := dyn.V("abc") _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected bool, found string`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -324,9 +641,10 @@ func TestNormalizeBoolError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected bool, found map`, Location: dyn.Location{}, + Path: dyn.EmptyPath, }, err[0]) } @@ -340,13 +658,35 @@ func TestNormalizeInt(t *testing.T) { func TestNormalizeIntNil(t *testing.T) { var typ int - vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, - Summary: `expected int, found nil`, + Severity: diag.Warning, + Summary: `expected a int value, found null`, Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeIntFromFloat(t *testing.T) { + var typ int + vin := dyn.V(float64(1.0)) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, dyn.V(int64(1)), vout) +} + +func TestNormalizeIntFromFloatError(t *testing.T) { + var typ int + vin := dyn.V(1.5) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `cannot accurately represent "1.5" as integer due to precision loss`, + Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -358,15 +698,24 @@ func TestNormalizeIntFromString(t *testing.T) { assert.Equal(t, dyn.V(int64(123)), vout) } +func TestNormalizeIntFromStringVariableReference(t *testing.T) { + var typ int + vin := dyn.V("${var.foo}") + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + func TestNormalizeIntFromStringError(t *testing.T) { var typ int vin := dyn.V("abc") _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `cannot parse "abc" as an integer`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -376,9 +725,10 @@ func TestNormalizeIntError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected int, found map`, Location: dyn.Location{}, + Path: dyn.EmptyPath, }, err[0]) } @@ -392,13 +742,39 @@ func TestNormalizeFloat(t *testing.T) { func TestNormalizeFloatNil(t *testing.T) { var typ float64 - vin := dyn.NewValue(nil, dyn.Location{File: "file", Line: 1, Column: 1}) + vin := dyn.NewValue(nil, []dyn.Location{{File: "file", Line: 1, Column: 1}}) _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, - Summary: `expected float, found nil`, + Severity: diag.Warning, + Summary: `expected a float value, found null`, Location: vin.Location(), + Path: dyn.EmptyPath, + }, err[0]) +} + +func TestNormalizeFloatFromInt(t *testing.T) { + var typ float64 + + // Maximum safe integer that can be accurately represented as a float. + vin := dyn.V(int64(9007199254740992)) + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, dyn.V(float64(9007199254740992)), vout) +} + +func TestNormalizeFloatFromIntError(t *testing.T) { + var typ float64 + + // Minimum integer that cannot be accurately represented as a float. + vin := dyn.V(9007199254740992 + 1) + _, err := Normalize(&typ, vin) + assert.Len(t, err, 1) + assert.Equal(t, diag.Diagnostic{ + Severity: diag.Warning, + Summary: `cannot accurately represent "9007199254740993" as floating point number due to precision loss`, + Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -410,15 +786,24 @@ func TestNormalizeFloatFromString(t *testing.T) { assert.Equal(t, dyn.V(1.2), vout) } +func TestNormalizeFloatFromStringVariableReference(t *testing.T) { + var typ float64 + vin := dyn.V("${var.foo}") + vout, err := Normalize(&typ, vin) + assert.Empty(t, err) + assert.Equal(t, vin, vout) +} + func TestNormalizeFloatFromStringError(t *testing.T) { var typ float64 vin := dyn.V("abc") _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `cannot parse "abc" as a floating point number`, Location: vin.Location(), + Path: dyn.EmptyPath, }, err[0]) } @@ -428,8 +813,55 @@ func TestNormalizeFloatError(t *testing.T) { _, err := Normalize(&typ, vin) assert.Len(t, err, 1) assert.Equal(t, diag.Diagnostic{ - Severity: diag.Error, + Severity: diag.Warning, Summary: `expected float, found map`, Location: dyn.Location{}, + Path: dyn.EmptyPath, }, err[0]) } + +func TestNormalizeAnchors(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + } + + var typ Tmp + vin := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "anchor": dyn.V("anchor").MarkAnchor(), + }) + + vout, err := Normalize(typ, vin) + assert.Len(t, err, 0) + + // The field that can be mapped to the struct field is retained. + assert.Equal(t, map[string]any{ + "foo": "bar", + }, vout.AsAny()) +} + +func TestNormalizeBoolToAny(t *testing.T) { + var typ any + vin := dyn.NewValue(false, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue(false, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) +} + +func TestNormalizeIntToAny(t *testing.T) { + var typ any + vin := dyn.NewValue(10, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue(10, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) +} + +func TestNormalizeSliceToAny(t *testing.T) { + var typ any + v1 := dyn.NewValue(1, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + v2 := dyn.NewValue(2, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vin := dyn.NewValue([]dyn.Value{v1, v2}, []dyn.Location{{File: "file", Line: 1, Column: 1}}) + vout, err := Normalize(&typ, vin) + assert.Len(t, err, 0) + assert.Equal(t, dyn.NewValue([]dyn.Value{v1, v2}, []dyn.Location{{File: "file", Line: 1, Column: 1}}), vout) +} diff --git a/libs/dyn/convert/struct_info.go b/libs/dyn/convert/struct_info.go index dc3ed4da4..595e52edd 100644 --- a/libs/dyn/convert/struct_info.go +++ b/libs/dyn/convert/struct_info.go @@ -6,6 +6,7 @@ import ( "sync" "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/textutil" ) // structInfo holds the type information we need to efficiently @@ -84,6 +85,14 @@ func buildStructInfo(typ reflect.Type) structInfo { } name, _, _ := strings.Cut(sf.Tag.Get("json"), ",") + if typ.Name() == "QualityMonitor" && name == "-" { + urlName, _, _ := strings.Cut(sf.Tag.Get("url"), ",") + if urlName == "" || urlName == "-" { + name = textutil.CamelToSnakeCase(sf.Name) + } else { + name = urlName + } + } if name == "" || name == "-" { continue } diff --git a/libs/dyn/convert/struct_info_test.go b/libs/dyn/convert/struct_info_test.go index 08be3c47e..20348ff60 100644 --- a/libs/dyn/convert/struct_info_test.go +++ b/libs/dyn/convert/struct_info_test.go @@ -5,7 +5,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestStructInfoPlain(t *testing.T) { diff --git a/libs/dyn/convert/to_typed.go b/libs/dyn/convert/to_typed.go index 209de12cb..181c88cc9 100644 --- a/libs/dyn/convert/to_typed.go +++ b/libs/dyn/convert/to_typed.go @@ -6,6 +6,7 @@ import ( "strconv" "github.com/databricks/cli/libs/dyn" + "github.com/databricks/cli/libs/dyn/dynvar" ) func ToTyped(dst any, src dyn.Value) error { @@ -15,7 +16,7 @@ func ToTyped(dst any, src dyn.Value) error { for dstv.Kind() == reflect.Pointer { // If the source value is nil and the destination is a settable pointer, // set the destination to nil. Also see `end_to_end_test.go`. - if dstv.CanSet() && src == dyn.NilValue { + if dstv.CanSet() && src.Kind() == dyn.KindNil { dstv.SetZero() return nil } @@ -45,6 +46,8 @@ func ToTyped(dst any, src dyn.Value) error { return toTypedInt(dstv, src) case reflect.Float32, reflect.Float64: return toTypedFloat(dstv, src) + case reflect.Interface: + return toTypedInterface(dstv, src) } return fmt.Errorf("unsupported type: %s", dstv.Kind()) @@ -53,9 +56,16 @@ func ToTyped(dst any, src dyn.Value) error { func toTypedStruct(dst reflect.Value, src dyn.Value) error { switch src.Kind() { case dyn.KindMap: + // Zero the destination struct such that fields + // that aren't present in [src] are cleared. + dst.SetZero() + info := getStructInfo(dst.Type()) - for k, v := range src.MustMap() { - index, ok := info.Fields[k] + for _, pair := range src.MustMap().Pairs() { + pk := pair.Key + pv := pair.Value + + index, ok := info.Fields[pk.MustString()] if !ok { // Ignore unknown fields. // A warning will be printed later. See PR #904. @@ -77,7 +87,7 @@ func toTypedStruct(dst reflect.Value, src dyn.Value) error { f = f.Field(x) } - err := ToTyped(f.Addr().Interface(), v) + err := ToTyped(f.Addr().Interface(), pv) if err != nil { return err } @@ -93,6 +103,12 @@ func toTypedStruct(dst reflect.Value, src dyn.Value) error { case dyn.KindNil: dst.SetZero() return nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -107,20 +123,29 @@ func toTypedMap(dst reflect.Value, src dyn.Value) error { m := src.MustMap() // Always overwrite. - dst.Set(reflect.MakeMapWithSize(dst.Type(), len(m))) - for k, v := range m { - kv := reflect.ValueOf(k) + dst.Set(reflect.MakeMapWithSize(dst.Type(), m.Len())) + for _, pair := range m.Pairs() { + pk := pair.Key + pv := pair.Value + kv := reflect.ValueOf(pk.MustString()) + kt := dst.Type().Key() vv := reflect.New(dst.Type().Elem()) - err := ToTyped(vv.Interface(), v) + err := ToTyped(vv.Interface(), pv) if err != nil { return err } - dst.SetMapIndex(kv, vv.Elem()) + dst.SetMapIndex(kv.Convert(kt), vv.Elem()) } return nil case dyn.KindNil: dst.SetZero() return nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -146,6 +171,12 @@ func toTypedSlice(dst reflect.Value, src dyn.Value) error { case dyn.KindNil: dst.SetZero() return nil + case dyn.KindString: + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -191,6 +222,11 @@ func toTypedBool(dst reflect.Value, src dyn.Value) error { dst.SetBool(false) return nil } + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -209,6 +245,11 @@ func toTypedInt(dst reflect.Value, src dyn.Value) error { dst.SetInt(i64) return nil } + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -227,6 +268,11 @@ func toTypedFloat(dst reflect.Value, src dyn.Value) error { dst.SetFloat(f64) return nil } + // Ignore pure variable references (e.g. ${var.foo}). + if dynvar.IsPureVariableReference(src.MustString()) { + dst.SetZero() + return nil + } } return TypeError{ @@ -234,3 +280,13 @@ func toTypedFloat(dst reflect.Value, src dyn.Value) error { msg: fmt.Sprintf("expected a float, found a %s", src.Kind()), } } + +func toTypedInterface(dst reflect.Value, src dyn.Value) error { + if src.Kind() == dyn.KindNil { + dst.Set(reflect.Zero(dst.Type())) + return nil + } + + dst.Set(reflect.ValueOf(src.AsAny())) + return nil +} diff --git a/libs/dyn/convert/to_typed_test.go b/libs/dyn/convert/to_typed_test.go index 3adc94c79..37d85539c 100644 --- a/libs/dyn/convert/to_typed_test.go +++ b/libs/dyn/convert/to_typed_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) @@ -59,6 +59,27 @@ func TestToTypedStructOverwrite(t *testing.T) { assert.Equal(t, "baz", out.Bar) } +func TestToTypedStructClearFields(t *testing.T) { + type Tmp struct { + Foo string `json:"foo"` + Bar string `json:"bar,omitempty"` + } + + // Struct value with non-empty fields. + var out = Tmp{ + Foo: "baz", + Bar: "qux", + } + + // Value is an empty map. + v := dyn.V(map[string]dyn.Value{}) + + // The previously set fields should be cleared. + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, Tmp{}, out) +} + func TestToTypedStructAnonymousByValue(t *testing.T) { type Bar struct { Bar string `json:"bar"` @@ -334,10 +355,17 @@ func TestToTypedBoolFromString(t *testing.T) { } // Other - err := ToTyped(&out, dyn.V("${var.foo}")) + err := ToTyped(&out, dyn.V("some other string")) require.Error(t, err) } +func TestToTypedBoolFromStringVariableReference(t *testing.T) { + var out bool = true + err := ToTyped(&out, dyn.V("${var.foo}")) + require.NoError(t, err) + assert.Equal(t, false, out) +} + func TestToTypedInt(t *testing.T) { var out int err := ToTyped(&out, dyn.V(1234)) @@ -393,6 +421,13 @@ func TestToTypedIntFromStringInt(t *testing.T) { assert.Equal(t, int(123), out) } +func TestToTypedIntFromStringVariableReference(t *testing.T) { + var out int = 123 + err := ToTyped(&out, dyn.V("${var.foo}")) + require.NoError(t, err) + assert.Equal(t, int(0), out) +} + func TestToTypedFloat32(t *testing.T) { var out float32 err := ToTyped(&out, dyn.V(float32(1.0))) @@ -446,3 +481,62 @@ func TestToTypedFloat64FromString(t *testing.T) { require.NoError(t, err) assert.Equal(t, float64(1.2), out) } + +func TestToTypedFloat32FromStringVariableReference(t *testing.T) { + var out float32 = 1.0 + err := ToTyped(&out, dyn.V("${var.foo}")) + require.NoError(t, err) + assert.Equal(t, float32(0.0), out) +} + +func TestToTypedFloat64FromStringVariableReference(t *testing.T) { + var out float64 = 1.0 + err := ToTyped(&out, dyn.V("${var.foo}")) + require.NoError(t, err) + assert.Equal(t, float64(0.0), out) +} + +func TestToTypedWithAliasKeyType(t *testing.T) { + type custom string + + var out map[custom]string + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), + }) + + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Len(t, out, 2) + assert.Equal(t, "bar", out["foo"]) + assert.Equal(t, "baz", out["bar"]) +} + +func TestToTypedAnyWithBool(t *testing.T) { + var out any + err := ToTyped(&out, dyn.V(false)) + require.NoError(t, err) + assert.Equal(t, false, out) + + err = ToTyped(&out, dyn.V(true)) + require.NoError(t, err) + assert.Equal(t, true, out) +} + +func TestToTypedAnyWithMap(t *testing.T) { + var out any + v := dyn.V(map[string]dyn.Value{ + "foo": dyn.V("bar"), + "bar": dyn.V("baz"), + }) + err := ToTyped(&out, v) + require.NoError(t, err) + assert.Equal(t, map[string]any{"foo": "bar", "bar": "baz"}, out) +} + +func TestToTypedAnyWithNil(t *testing.T) { + var out any + err := ToTyped(&out, dyn.NilValue) + require.NoError(t, err) + assert.Equal(t, nil, out) +} diff --git a/libs/dyn/dynassert/assert.go b/libs/dyn/dynassert/assert.go new file mode 100644 index 000000000..dc6676ca2 --- /dev/null +++ b/libs/dyn/dynassert/assert.go @@ -0,0 +1,113 @@ +package dynassert + +import ( + "github.com/databricks/cli/libs/dyn" + "github.com/stretchr/testify/assert" +) + +func Equal(t assert.TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + ev, eok := expected.(dyn.Value) + av, aok := actual.(dyn.Value) + if eok && aok && ev.IsValid() && av.IsValid() { + if !assert.Equal(t, ev.AsAny(), av.AsAny(), msgAndArgs...) { + return false + } + + // The values are equal on contents. Now compare the locations. + if !assert.Equal(t, ev.Location(), av.Location(), msgAndArgs...) { + return false + } + + // Walk ev and av and compare the locations of each element. + _, err := dyn.Walk(ev, func(p dyn.Path, evv dyn.Value) (dyn.Value, error) { + avv, err := dyn.GetByPath(av, p) + if assert.NoError(t, err, "unable to get value from actual value at path %v", p.String()) { + assert.Equal(t, evv.Location(), avv.Location()) + } + return evv, nil + }) + return assert.NoError(t, err) + } + + return assert.Equal(t, expected, actual, msgAndArgs...) +} + +func EqualValues(t assert.TestingT, expected, actual interface{}, msgAndArgs ...interface{}) bool { + return assert.EqualValues(t, expected, actual, msgAndArgs...) +} + +func NotEqual(t assert.TestingT, expected interface{}, actual interface{}, msgAndArgs ...interface{}) bool { + return assert.NotEqual(t, expected, actual, msgAndArgs...) +} + +func Len(t assert.TestingT, object interface{}, length int, msgAndArgs ...interface{}) bool { + return assert.Len(t, object, length, msgAndArgs...) +} + +func Empty(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return assert.Empty(t, object, msgAndArgs...) +} + +func Nil(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return assert.Nil(t, object, msgAndArgs...) +} + +func NotNil(t assert.TestingT, object interface{}, msgAndArgs ...interface{}) bool { + return assert.NotNil(t, object, msgAndArgs...) +} + +func NoError(t assert.TestingT, err error, msgAndArgs ...interface{}) bool { + return assert.NoError(t, err, msgAndArgs...) +} + +func Error(t assert.TestingT, err error, msgAndArgs ...interface{}) bool { + return assert.Error(t, err, msgAndArgs...) +} + +func EqualError(t assert.TestingT, theError error, errString string, msgAndArgs ...interface{}) bool { + return assert.EqualError(t, theError, errString, msgAndArgs...) +} + +func ErrorContains(t assert.TestingT, theError error, contains string, msgAndArgs ...interface{}) bool { + return assert.ErrorContains(t, theError, contains, msgAndArgs...) +} + +func ErrorIs(t assert.TestingT, theError, target error, msgAndArgs ...interface{}) bool { + return assert.ErrorIs(t, theError, target, msgAndArgs...) +} + +func True(t assert.TestingT, value bool, msgAndArgs ...interface{}) bool { + return assert.True(t, value, msgAndArgs...) +} + +func False(t assert.TestingT, value bool, msgAndArgs ...interface{}) bool { + return assert.False(t, value, msgAndArgs...) +} + +func Contains(t assert.TestingT, list interface{}, element interface{}, msgAndArgs ...interface{}) bool { + return assert.Contains(t, list, element, msgAndArgs...) +} + +func NotContains(t assert.TestingT, list interface{}, element interface{}, msgAndArgs ...interface{}) bool { + return assert.NotContains(t, list, element, msgAndArgs...) +} + +func ElementsMatch(t assert.TestingT, listA, listB interface{}, msgAndArgs ...interface{}) bool { + return assert.ElementsMatch(t, listA, listB, msgAndArgs...) +} + +func Panics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool { + return assert.Panics(t, f, msgAndArgs...) +} + +func PanicsWithValue(t assert.TestingT, expected interface{}, f func(), msgAndArgs ...interface{}) bool { + return assert.PanicsWithValue(t, expected, f, msgAndArgs...) +} + +func PanicsWithError(t assert.TestingT, errString string, f func(), msgAndArgs ...interface{}) bool { + return assert.PanicsWithError(t, errString, f, msgAndArgs...) +} + +func NotPanics(t assert.TestingT, f func(), msgAndArgs ...interface{}) bool { + return assert.NotPanics(t, f, msgAndArgs...) +} diff --git a/libs/dyn/dynassert/assert_test.go b/libs/dyn/dynassert/assert_test.go new file mode 100644 index 000000000..43258bd20 --- /dev/null +++ b/libs/dyn/dynassert/assert_test.go @@ -0,0 +1,45 @@ +package dynassert + +import ( + "go/parser" + "go/token" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestThatThisTestPackageIsUsed(t *testing.T) { + var base = ".." + var files []string + err := fs.WalkDir(os.DirFS(base), ".", func(path string, d fs.DirEntry, err error) error { + if d.IsDir() { + // Filter this directory. + if filepath.Base(path) == "dynassert" { + return fs.SkipDir + } + } + if ok, _ := filepath.Match("*_test.go", d.Name()); ok { + files = append(files, filepath.Join(base, path)) + } + return nil + }) + require.NoError(t, err) + + // Confirm that none of the test files under `libs/dyn` import the + // `testify/assert` package and instead import this package for asserts. + fset := token.NewFileSet() + for _, file := range files { + f, err := parser.ParseFile(fset, file, nil, parser.ParseComments) + require.NoError(t, err) + + for _, imp := range f.Imports { + if strings.Contains(imp.Path.Value, `github.com/stretchr/testify/assert`) { + t.Errorf("File %s should not import github.com/stretchr/testify/assert", file) + } + } + } +} diff --git a/libs/dyn/dynvar/lookup_test.go b/libs/dyn/dynvar/lookup_test.go index 2341d7208..b78115ee8 100644 --- a/libs/dyn/dynvar/lookup_test.go +++ b/libs/dyn/dynvar/lookup_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/databricks/cli/libs/dyn/dynvar" - "github.com/stretchr/testify/assert" ) func TestDefaultLookup(t *testing.T) { diff --git a/libs/dyn/dynvar/ref.go b/libs/dyn/dynvar/ref.go index e4616c520..bf160fa85 100644 --- a/libs/dyn/dynvar/ref.go +++ b/libs/dyn/dynvar/ref.go @@ -6,7 +6,9 @@ import ( "github.com/databricks/cli/libs/dyn" ) -var re = regexp.MustCompile(`\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*)*)\}`) +const VariableRegex = `\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\[[0-9]+\])*)*(\[[0-9]+\])*)\}` + +var re = regexp.MustCompile(VariableRegex) // ref represents a variable reference. // It is a string [dyn.Value] contained in a larger [dyn.Value]. @@ -67,3 +69,7 @@ func (v ref) references() []string { } return out } + +func IsPureVariableReference(s string) bool { + return len(s) > 0 && re.FindString(s) == s +} diff --git a/libs/dyn/dynvar/ref_test.go b/libs/dyn/dynvar/ref_test.go index b3066276c..aff3643e0 100644 --- a/libs/dyn/dynvar/ref_test.go +++ b/libs/dyn/dynvar/ref_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) @@ -44,3 +44,10 @@ func TestNewRefInvalidPattern(t *testing.T) { require.False(t, ok, "should not match invalid pattern: %s", v) } } + +func TestIsPureVariableReference(t *testing.T) { + assert.False(t, IsPureVariableReference("")) + assert.False(t, IsPureVariableReference("${foo.bar} suffix")) + assert.False(t, IsPureVariableReference("prefix ${foo.bar}")) + assert.True(t, IsPureVariableReference("${foo.bar}")) +} diff --git a/libs/dyn/dynvar/resolve.go b/libs/dyn/dynvar/resolve.go index b4e119b6d..111da25c8 100644 --- a/libs/dyn/dynvar/resolve.go +++ b/libs/dyn/dynvar/resolve.go @@ -38,12 +38,20 @@ func Resolve(in dyn.Value, fn Lookup) (out dyn.Value, err error) { return resolver{in: in, fn: fn}.run() } +type lookupResult struct { + v dyn.Value + err error +} + type resolver struct { in dyn.Value fn Lookup refs map[string]ref resolved map[string]dyn.Value + + // Memoization for lookups. + lookups map[string]lookupResult } func (r resolver) run() (out dyn.Value, err error) { @@ -84,8 +92,10 @@ func (r *resolver) collectVariableReferences() (err error) { } func (r *resolver) resolveVariableReferences() (err error) { - // Initialize map for resolved variables. - // We use this for memoization. + // Initialize cache for lookups. + r.lookups = make(map[string]lookupResult) + + // Initialize cache for resolved variable references. r.resolved = make(map[string]dyn.Value) // Resolve each variable reference (in order). @@ -95,38 +105,17 @@ func (r *resolver) resolveVariableReferences() (err error) { keys := maps.Keys(r.refs) sort.Strings(keys) for _, key := range keys { - _, err := r.resolve(key, []string{key}) + v, err := r.resolveRef(r.refs[key], []string{key}) if err != nil { return err } + r.resolved[key] = v } return nil } -func (r *resolver) resolve(key string, seen []string) (dyn.Value, error) { - // Check if we have already resolved this variable reference. - if v, ok := r.resolved[key]; ok { - return v, nil - } - - ref, ok := r.refs[key] - if !ok { - // Perform lookup in the input. - p, err := dyn.NewPathFromString(key) - if err != nil { - return dyn.InvalidValue, err - } - v, err := r.fn(p) - if err != nil && dyn.IsNoSuchKeyError(err) { - return dyn.InvalidValue, fmt.Errorf( - "reference does not exist: ${%s}", - key, - ) - } - return v, err - } - +func (r *resolver) resolveRef(ref ref, seen []string) (dyn.Value, error) { // This is an unresolved variable reference. deps := ref.references() @@ -143,7 +132,7 @@ func (r *resolver) resolve(key string, seen []string) (dyn.Value, error) { ) } - v, err := r.resolve(dep, append(seen, dep)) + v, err := r.resolveKey(dep, append(seen, dep)) // If we should skip resolution of this key, index j will hold an invalid [dyn.Value]. if errors.Is(err, ErrSkipResolution) { @@ -161,8 +150,12 @@ func (r *resolver) resolve(key string, seen []string) (dyn.Value, error) { if ref.isPure() && complete { // If the variable reference is pure, we can substitute it. // This is useful for interpolating values of non-string types. - r.resolved[key] = resolved[0] - return resolved[0], nil + // + // Note: we use the location of the variable reference to preserve the information + // of where it is used. This also means that relative path resolution is done + // relative to where a variable is used, not where it is defined. + // + return dyn.NewValue(resolved[0].Value(), ref.value.Locations()), nil } // Not pure; perform string interpolation. @@ -185,10 +178,42 @@ func (r *resolver) resolve(key string, seen []string) (dyn.Value, error) { ref.str = strings.Replace(ref.str, ref.matches[j][0], s, 1) } - // Store the interpolated value. - v := dyn.NewValue(ref.str, ref.value.Location()) - r.resolved[key] = v - return v, nil + return dyn.NewValue(ref.str, ref.value.Locations()), nil +} + +func (r *resolver) resolveKey(key string, seen []string) (dyn.Value, error) { + // Check if we have already looked up this key. + if v, ok := r.lookups[key]; ok { + return v.v, v.err + } + + // Parse the key into a path. + p, err := dyn.NewPathFromString(key) + if err != nil { + return dyn.InvalidValue, err + } + + // Look up the value for the given key. + v, err := r.fn(p) + if err != nil { + if dyn.IsNoSuchKeyError(err) { + err = fmt.Errorf("reference does not exist: ${%s}", key) + } + + // Cache the return value and return to the caller. + r.lookups[key] = lookupResult{v: dyn.InvalidValue, err: err} + return dyn.InvalidValue, err + } + + // If the returned value is a valid variable reference, resolve it. + ref, ok := newRef(v) + if ok { + v, err = r.resolveRef(ref, seen) + } + + // Cache the return value and return to the caller. + r.lookups[key] = lookupResult{v: v, err: err} + return v, err } func (r *resolver) replaceVariableReferences() (dyn.Value, error) { diff --git a/libs/dyn/dynvar/resolve_test.go b/libs/dyn/dynvar/resolve_test.go index ba700503e..498322a42 100644 --- a/libs/dyn/dynvar/resolve_test.go +++ b/libs/dyn/dynvar/resolve_test.go @@ -4,8 +4,8 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/databricks/cli/libs/dyn/dynvar" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -182,3 +182,128 @@ func TestResolveWithSkip(t *testing.T) { assert.Equal(t, "a ${b}", getByPath(t, out, "e").MustString()) assert.Equal(t, "${b} a a ${b}", getByPath(t, out, "f").MustString()) } + +func TestResolveWithSkipEverything(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "a": dyn.V("a"), + "b": dyn.V("b"), + "c": dyn.V("${a}"), + "d": dyn.V("${b}"), + "e": dyn.V("${a} ${b}"), + "f": dyn.V("${b} ${a} ${a} ${b}"), + "g": dyn.V("${d} ${c} ${c} ${d}"), + }) + + // The call must not replace anything if the lookup function returns ErrSkipResolution. + out, err := dynvar.Resolve(in, func(path dyn.Path) (dyn.Value, error) { + return dyn.InvalidValue, dynvar.ErrSkipResolution + }) + require.NoError(t, err) + assert.Equal(t, "a", getByPath(t, out, "a").MustString()) + assert.Equal(t, "b", getByPath(t, out, "b").MustString()) + assert.Equal(t, "${a}", getByPath(t, out, "c").MustString()) + assert.Equal(t, "${b}", getByPath(t, out, "d").MustString()) + assert.Equal(t, "${a} ${b}", getByPath(t, out, "e").MustString()) + assert.Equal(t, "${b} ${a} ${a} ${b}", getByPath(t, out, "f").MustString()) + assert.Equal(t, "${d} ${c} ${c} ${d}", getByPath(t, out, "g").MustString()) +} + +func TestResolveWithInterpolateNewRef(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "a": dyn.V("a"), + "b": dyn.V("${a}"), + }) + + // The call replaces ${a} with ${foobar} and skips everything else. + out, err := dynvar.Resolve(in, func(path dyn.Path) (dyn.Value, error) { + if path.String() == "a" { + return dyn.V("${foobar}"), nil + } + return dyn.InvalidValue, dynvar.ErrSkipResolution + }) + + require.NoError(t, err) + assert.Equal(t, "a", getByPath(t, out, "a").MustString()) + assert.Equal(t, "${foobar}", getByPath(t, out, "b").MustString()) +} + +func TestResolveWithInterpolateAliasedRef(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "a": dyn.V("a"), + "b": dyn.V("${a}"), + "c": dyn.V("${x}"), + }) + + // The call replaces ${x} with ${b} and skips everything else. + out, err := dynvar.Resolve(in, func(path dyn.Path) (dyn.Value, error) { + if path.String() == "x" { + return dyn.V("${b}"), nil + } + return dyn.GetByPath(in, path) + }) + + require.NoError(t, err) + assert.Equal(t, "a", getByPath(t, out, "a").MustString()) + assert.Equal(t, "a", getByPath(t, out, "b").MustString()) + assert.Equal(t, "a", getByPath(t, out, "c").MustString()) +} + +func TestResolveIndexedRefs(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{dyn.V("a"), dyn.V("b")}), + "a": dyn.V("a: ${slice[0]}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} + +func TestResolveIndexedRefsFromMap(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "map": dyn.V( + map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{dyn.V("a")}), + }), + "a": dyn.V("a: ${map.slice[0]}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} + +func TestResolveMapFieldFromIndexedRefs(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "map": dyn.V( + map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{ + dyn.V(map[string]dyn.Value{ + "value": dyn.V("a"), + }), + }), + }), + "a": dyn.V("a: ${map.slice[0].value}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} + +func TestResolveNestedIndexedRefs(t *testing.T) { + in := dyn.V(map[string]dyn.Value{ + "slice": dyn.V([]dyn.Value{ + dyn.V([]dyn.Value{dyn.V("a")}), + }), + "a": dyn.V("a: ${slice[0][0]}"), + }) + + out, err := dynvar.Resolve(in, dynvar.DefaultLookup(in)) + require.NoError(t, err) + + assert.Equal(t, "a: a", getByPath(t, out, "a").MustString()) +} diff --git a/libs/dyn/kind.go b/libs/dyn/kind.go index 8f51c25c6..9d507fbc5 100644 --- a/libs/dyn/kind.go +++ b/libs/dyn/kind.go @@ -22,7 +22,7 @@ const ( func kindOf(v any) Kind { switch v.(type) { - case map[string]Value: + case Mapping: return KindMap case []Value: return KindSequence diff --git a/libs/dyn/kind_test.go b/libs/dyn/kind_test.go index 84c90713f..9889d31e1 100644 --- a/libs/dyn/kind_test.go +++ b/libs/dyn/kind_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestKindZeroValue(t *testing.T) { diff --git a/libs/dyn/location.go b/libs/dyn/location.go index cd369193e..961d2f121 100644 --- a/libs/dyn/location.go +++ b/libs/dyn/location.go @@ -1,6 +1,9 @@ package dyn -import "fmt" +import ( + "fmt" + "path/filepath" +) type Location struct { File string @@ -11,3 +14,11 @@ type Location struct { func (l Location) String() string { return fmt.Sprintf("%s:%d:%d", l.File, l.Line, l.Column) } + +func (l Location) Directory() (string, error) { + if l.File == "" { + return "", fmt.Errorf("no file in location") + } + + return filepath.Dir(l.File), nil +} diff --git a/libs/dyn/location_test.go b/libs/dyn/location_test.go index 29226d73d..e11f7cb56 100644 --- a/libs/dyn/location_test.go +++ b/libs/dyn/location_test.go @@ -4,10 +4,23 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestLocation(t *testing.T) { loc := dyn.Location{File: "file", Line: 1, Column: 2} assert.Equal(t, "file:1:2", loc.String()) } + +func TestLocationDirectory(t *testing.T) { + loc := dyn.Location{File: "file", Line: 1, Column: 2} + dir, err := loc.Directory() + assert.NoError(t, err) + assert.Equal(t, ".", dir) +} + +func TestLocationDirectoryNoFile(t *testing.T) { + loc := dyn.Location{} + _, err := loc.Directory() + assert.Error(t, err) +} diff --git a/libs/dyn/mapping.go b/libs/dyn/mapping.go new file mode 100644 index 000000000..668f57ecc --- /dev/null +++ b/libs/dyn/mapping.go @@ -0,0 +1,148 @@ +package dyn + +import ( + "fmt" + "maps" + "slices" +) + +// Pair represents a single key-value pair in a Mapping. +type Pair struct { + Key Value + Value Value +} + +// Mapping represents a key-value map of dynamic values. +// It exists because plain Go maps cannot use dynamic values for keys. +// We need to use dynamic values for keys because it lets us associate metadata +// with keys (i.e. their definition location). Keys must be strings. +type Mapping struct { + pairs []Pair + index map[string]int +} + +// NewMapping creates a new empty Mapping. +func NewMapping() Mapping { + return Mapping{ + pairs: make([]Pair, 0), + index: make(map[string]int), + } +} + +// newMappingWithSize creates a new Mapping preallocated to the specified size. +func newMappingWithSize(size int) Mapping { + return Mapping{ + pairs: make([]Pair, 0, size), + index: make(map[string]int, size), + } +} + +// newMappingFromGoMap creates a new Mapping from a Go map of string keys and dynamic values. +func newMappingFromGoMap(vin map[string]Value) Mapping { + m := newMappingWithSize(len(vin)) + for k, v := range vin { + m.Set(V(k), v) + } + return m +} + +// Pairs returns all the key-value pairs in the Mapping. +func (m Mapping) Pairs() []Pair { + return m.pairs +} + +// Len returns the number of key-value pairs in the Mapping. +func (m Mapping) Len() int { + return len(m.pairs) +} + +// GetPair returns the key-value pair with the specified key. +// It also returns a boolean indicating whether the pair was found. +func (m Mapping) GetPair(key Value) (Pair, bool) { + skey, ok := key.AsString() + if !ok { + return Pair{}, false + } + return m.GetPairByString(skey) +} + +// GetPairByString returns the key-value pair with the specified string key. +// It also returns a boolean indicating whether the pair was found. +func (m Mapping) GetPairByString(skey string) (Pair, bool) { + if i, ok := m.index[skey]; ok { + return m.pairs[i], true + } + return Pair{}, false +} + +// Get returns the value associated with the specified key. +// It also returns a boolean indicating whether the value was found. +func (m Mapping) Get(key Value) (Value, bool) { + p, ok := m.GetPair(key) + return p.Value, ok +} + +// GetByString returns the value associated with the specified string key. +// It also returns a boolean indicating whether the value was found. +func (m *Mapping) GetByString(skey string) (Value, bool) { + p, ok := m.GetPairByString(skey) + return p.Value, ok +} + +// Set sets the value for the given key in the mapping. +// If the key already exists, the value is updated. +// If the key does not exist, a new key-value pair is added. +// The key must be a string, otherwise an error is returned. +func (m *Mapping) Set(key Value, value Value) error { + skey, ok := key.AsString() + if !ok { + return fmt.Errorf("key must be a string, got %s", key.Kind()) + } + + // If the key already exists, update the value. + if i, ok := m.index[skey]; ok { + m.pairs[i].Value = value + return nil + } + + // Otherwise, add a new pair. + m.pairs = append(m.pairs, Pair{key, value}) + if m.index == nil { + m.index = make(map[string]int) + } + m.index[skey] = len(m.pairs) - 1 + return nil +} + +// Keys returns all the keys in the Mapping. +func (m Mapping) Keys() []Value { + keys := make([]Value, 0, len(m.pairs)) + for _, p := range m.pairs { + keys = append(keys, p.Key) + } + return keys +} + +// Values returns all the values in the Mapping. +func (m Mapping) Values() []Value { + values := make([]Value, 0, len(m.pairs)) + for _, p := range m.pairs { + values = append(values, p.Value) + } + return values +} + +// Clone creates a shallow copy of the Mapping. +func (m Mapping) Clone() Mapping { + return Mapping{ + pairs: slices.Clone(m.pairs), + index: maps.Clone(m.index), + } +} + +// Merge merges the key-value pairs from another Mapping into the current Mapping. +func (m *Mapping) Merge(n Mapping) { + for _, p := range n.pairs { + m.Set(p.Key, p.Value) + } +} diff --git a/libs/dyn/mapping_test.go b/libs/dyn/mapping_test.go new file mode 100644 index 000000000..43b24b0c5 --- /dev/null +++ b/libs/dyn/mapping_test.go @@ -0,0 +1,204 @@ +package dyn_test + +import ( + "fmt" + "testing" + + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/stretchr/testify/require" +) + +func TestNewMapping(t *testing.T) { + m := dyn.NewMapping() + assert.Equal(t, 0, m.Len()) +} + +func TestMappingZeroValue(t *testing.T) { + var m dyn.Mapping + assert.Equal(t, 0, m.Len()) + + value, ok := m.Get(dyn.V("key")) + assert.Equal(t, dyn.InvalidValue, value) + assert.False(t, ok) + assert.Len(t, m.Keys(), 0) + assert.Len(t, m.Values(), 0) +} + +func TestMappingGet(t *testing.T) { + var m dyn.Mapping + err := m.Set(dyn.V("key"), dyn.V("value")) + assert.NoError(t, err) + assert.Equal(t, 1, m.Len()) + + // Call GetPair + p, ok := m.GetPair(dyn.V("key")) + assert.True(t, ok) + assert.Equal(t, dyn.V("key"), p.Key) + assert.Equal(t, dyn.V("value"), p.Value) + + // Modify the value to make sure we're not getting a reference + p.Value = dyn.V("newvalue") + + // Call GetPair with invalid key + p, ok = m.GetPair(dyn.V(1234)) + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, p.Key) + assert.Equal(t, dyn.InvalidValue, p.Value) + + // Call GetPair with non-existent key + p, ok = m.GetPair(dyn.V("enoexist")) + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, p.Key) + assert.Equal(t, dyn.InvalidValue, p.Value) + + // Call GetPairByString + p, ok = m.GetPairByString("key") + assert.True(t, ok) + assert.Equal(t, dyn.V("key"), p.Key) + assert.Equal(t, dyn.V("value"), p.Value) + + // Modify the value to make sure we're not getting a reference + p.Value = dyn.V("newvalue") + + // Call GetPairByString with with non-existent key + p, ok = m.GetPairByString("enoexist") + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, p.Key) + assert.Equal(t, dyn.InvalidValue, p.Value) + + // Call Get + value, ok := m.Get(dyn.V("key")) + assert.True(t, ok) + assert.Equal(t, dyn.V("value"), value) + + // Call Get with invalid key + value, ok = m.Get(dyn.V(1234)) + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, value) + + // Call Get with non-existent key + value, ok = m.Get(dyn.V("enoexist")) + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, value) + + // Call GetByString + value, ok = m.GetByString("key") + assert.True(t, ok) + assert.Equal(t, dyn.V("value"), value) + + // Call GetByString with non-existent key + value, ok = m.GetByString("enoexist") + assert.False(t, ok) + assert.Equal(t, dyn.InvalidValue, value) +} + +func TestMappingSet(t *testing.T) { + var err error + var m dyn.Mapping + + // Set a value + err = m.Set(dyn.V("key1"), dyn.V("foo")) + assert.NoError(t, err) + assert.Equal(t, 1, m.Len()) + + // Confirm the value + value, ok := m.GetByString("key1") + assert.True(t, ok) + assert.Equal(t, dyn.V("foo"), value) + + // Set another value + err = m.Set(dyn.V("key2"), dyn.V("bar")) + assert.NoError(t, err) + assert.Equal(t, 2, m.Len()) + + // Confirm the value + value, ok = m.Get(dyn.V("key2")) + assert.True(t, ok) + assert.Equal(t, dyn.V("bar"), value) + + // Overwrite first value + err = m.Set(dyn.V("key1"), dyn.V("qux")) + assert.NoError(t, err) + assert.Equal(t, 2, m.Len()) + + // Confirm the value + value, ok = m.Get(dyn.V("key1")) + assert.True(t, ok) + assert.Equal(t, dyn.V("qux"), value) + + // Try to set non-string key + err = m.Set(dyn.V(1), dyn.V("qux")) + assert.Error(t, err) + assert.Equal(t, 2, m.Len()) +} + +func TestMappingKeysValues(t *testing.T) { + var err error + + // Configure mapping + var m dyn.Mapping + err = m.Set(dyn.V("key1"), dyn.V("foo")) + assert.NoError(t, err) + err = m.Set(dyn.V("key2"), dyn.V("bar")) + assert.NoError(t, err) + + // Confirm keys + keys := m.Keys() + assert.Len(t, keys, 2) + assert.Contains(t, keys, dyn.V("key1")) + assert.Contains(t, keys, dyn.V("key2")) + + // Confirm values + values := m.Values() + assert.Len(t, values, 2) + assert.Contains(t, values, dyn.V("foo")) + assert.Contains(t, values, dyn.V("bar")) +} + +func TestMappingClone(t *testing.T) { + var err error + + // Configure mapping + var m1 dyn.Mapping + err = m1.Set(dyn.V("key1"), dyn.V("foo")) + assert.NoError(t, err) + err = m1.Set(dyn.V("key2"), dyn.V("bar")) + assert.NoError(t, err) + + // Clone mapping + m2 := m1.Clone() + assert.Equal(t, m1.Len(), m2.Len()) + + // Modify original mapping + err = m1.Set(dyn.V("key1"), dyn.V("qux")) + assert.NoError(t, err) + + // Confirm values + value, ok := m1.Get(dyn.V("key1")) + assert.True(t, ok) + assert.Equal(t, dyn.V("qux"), value) + value, ok = m2.Get(dyn.V("key1")) + assert.True(t, ok) + assert.Equal(t, dyn.V("foo"), value) +} + +func TestMappingMerge(t *testing.T) { + var m1 dyn.Mapping + for i := 0; i < 10; i++ { + err := m1.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i)) + require.NoError(t, err) + } + + var m2 dyn.Mapping + for i := 5; i < 15; i++ { + err := m2.Set(dyn.V(fmt.Sprintf("%d", i)), dyn.V(i)) + require.NoError(t, err) + } + + var out dyn.Mapping + out.Merge(m1) + assert.Equal(t, 10, out.Len()) + out.Merge(m2) + assert.Equal(t, 15, out.Len()) +} diff --git a/libs/dyn/merge/elements_by_key.go b/libs/dyn/merge/elements_by_key.go new file mode 100644 index 000000000..e6e640d14 --- /dev/null +++ b/libs/dyn/merge/elements_by_key.go @@ -0,0 +1,67 @@ +package merge + +import "github.com/databricks/cli/libs/dyn" + +type elementsByKey struct { + key string + keyFunc func(dyn.Value) string +} + +func (e elementsByKey) Map(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + // We know the type of this value is a sequence. + // For additional defence, return self if it is not. + elements, ok := v.AsSequence() + if !ok { + return v, nil + } + + seen := make(map[string]dyn.Value, len(elements)) + keys := make([]string, 0, len(elements)) + + // Iterate in natural order. For a given key, we first see the + // base definition and merge instances that come after it. + for i := range elements { + kv := elements[i].Get(e.key) + key := e.keyFunc(kv) + + // Register element with key if not yet seen before. + ref, ok := seen[key] + if !ok { + keys = append(keys, key) + seen[key] = elements[i] + continue + } + + // Merge this instance into the reference. + nv, err := Merge(ref, elements[i]) + if err != nil { + return v, err + } + + // Overwrite reference. + seen[key] = nv + } + + // Gather resulting elements in natural order. + out := make([]dyn.Value, 0, len(keys)) + for _, key := range keys { + nv, err := dyn.Set(seen[key], e.key, dyn.V(key)) + if err != nil { + return dyn.InvalidValue, err + } + out = append(out, nv) + } + + return dyn.NewValue(out, v.Locations()), nil +} + +// ElementsByKey returns a [dyn.MapFunc] that operates on a sequence +// where each element is a map. It groups elements by a key and merges +// elements with the same key. +// +// The function that extracts the key from an element is provided as +// a parameter. The resulting elements get their key field overwritten +// with the value as returned by the key function. +func ElementsByKey(key string, keyFunc func(dyn.Value) string) dyn.MapFunc { + return elementsByKey{key, keyFunc}.Map +} diff --git a/libs/dyn/merge/elements_by_key_test.go b/libs/dyn/merge/elements_by_key_test.go new file mode 100644 index 000000000..ef316cc66 --- /dev/null +++ b/libs/dyn/merge/elements_by_key_test.go @@ -0,0 +1,52 @@ +package merge + +import ( + "strings" + "testing" + + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" + "github.com/stretchr/testify/require" +) + +func TestElementByKey(t *testing.T) { + vin := dyn.V([]dyn.Value{ + dyn.V(map[string]dyn.Value{ + "key": dyn.V("foo"), + "value": dyn.V(42), + }), + dyn.V(map[string]dyn.Value{ + "key": dyn.V("bar"), + "value": dyn.V(43), + }), + dyn.V(map[string]dyn.Value{ + // Use upper case key to test that the resulting element has its + // key field assigned to the output of the key function. + // The key function in this test returns the lower case version of the key. + "key": dyn.V("FOO"), + "value": dyn.V(44), + }), + }) + + keyFunc := func(v dyn.Value) string { + return strings.ToLower(v.MustString()) + } + + vout, err := dyn.MapByPath(vin, dyn.EmptyPath, ElementsByKey("key", keyFunc)) + require.NoError(t, err) + assert.Len(t, vout.MustSequence(), 2) + assert.Equal(t, + vout.Index(0).AsAny(), + map[string]any{ + "key": "foo", + "value": 44, + }, + ) + assert.Equal(t, + vout.Index(1).AsAny(), + map[string]any{ + "key": "bar", + "value": 43, + }, + ) +} diff --git a/libs/dyn/merge/merge.go b/libs/dyn/merge/merge.go index 1cadbea60..29decd779 100644 --- a/libs/dyn/merge/merge.go +++ b/libs/dyn/merge/merge.go @@ -12,6 +12,26 @@ import ( // * Merging x with nil or nil with x always yields x. // * Merging maps a and b means entries from map b take precedence. // * Merging sequences a and b means concatenating them. +// +// Merging retains and accumulates the locations metadata associated with the values. +// This allows users of the module to track the provenance of values across merging of +// configuration trees, which is useful for reporting errors and warnings. +// +// Semantics for location metadata in the merged value are similar to the semantics +// for the values themselves: +// +// - When merging x with nil or nil with x, the location of x is retained. +// +// - When merging maps or sequences, the combined value retains the location of a and +// accumulates the location of b. The individual elements of the map or sequence retain +// their original locations, i.e., whether they were originally defined in a or b. +// +// The rationale for retaining location of a is that we would like to return +// the first location a bit of configuration showed up when reporting errors and warnings. +// +// - Merging primitive values means using the incoming value `b`. The location of the +// incoming value is retained and the location of the existing value `a` is accumulated. +// This is because the incoming value overwrites the existing value. func Merge(a, b dyn.Value) (dyn.Value, error) { return merge(a, b) } @@ -22,61 +42,61 @@ func merge(a, b dyn.Value) (dyn.Value, error) { // If a is nil, return b. if ak == dyn.KindNil { - return b, nil + return b.AppendLocationsFromValue(a), nil } // If b is nil, return a. if bk == dyn.KindNil { - return a, nil + return a.AppendLocationsFromValue(b), nil } // Call the appropriate merge function based on the kind of a and b. switch ak { case dyn.KindMap: if bk != dyn.KindMap { - return dyn.NilValue, fmt.Errorf("cannot merge map with %s", bk) + return dyn.InvalidValue, fmt.Errorf("cannot merge map with %s", bk) } return mergeMap(a, b) case dyn.KindSequence: if bk != dyn.KindSequence { - return dyn.NilValue, fmt.Errorf("cannot merge sequence with %s", bk) + return dyn.InvalidValue, fmt.Errorf("cannot merge sequence with %s", bk) } return mergeSequence(a, b) default: if ak != bk { - return dyn.NilValue, fmt.Errorf("cannot merge %s with %s", ak, bk) + return dyn.InvalidValue, fmt.Errorf("cannot merge %s with %s", ak, bk) } return mergePrimitive(a, b) } } func mergeMap(a, b dyn.Value) (dyn.Value, error) { - out := make(map[string]dyn.Value) + out := dyn.NewMapping() am := a.MustMap() bm := b.MustMap() // Add the values from a into the output map. - for k, v := range am { - out[k] = v - } + out.Merge(am) // Merge the values from b into the output map. - for k, v := range bm { - if _, ok := out[k]; ok { + for _, pair := range bm.Pairs() { + pk := pair.Key + pv := pair.Value + if ov, ok := out.Get(pk); ok { // If the key already exists, merge the values. - merged, err := merge(out[k], v) + merged, err := merge(ov, pv) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } - out[k] = merged + out.Set(pk, merged) } else { // Otherwise, just set the value. - out[k] = v + out.Set(pk, pv) } } - // Preserve the location of the first value. - return dyn.NewValue(out, a.Location()), nil + // Preserve the location of the first value. Accumulate the locations of the second value. + return dyn.NewValue(out, a.Locations()).AppendLocationsFromValue(b), nil } func mergeSequence(a, b dyn.Value) (dyn.Value, error) { @@ -88,11 +108,10 @@ func mergeSequence(a, b dyn.Value) (dyn.Value, error) { copy(out[:], as) copy(out[len(as):], bs) - // Preserve the location of the first value. - return dyn.NewValue(out, a.Location()), nil + // Preserve the location of the first value. Accumulate the locations of the second value. + return dyn.NewValue(out, a.Locations()).AppendLocationsFromValue(b), nil } - func mergePrimitive(a, b dyn.Value) (dyn.Value, error) { // Merging primitive values means using the incoming value. - return b, nil + return b.AppendLocationsFromValue(a), nil } diff --git a/libs/dyn/merge/merge_test.go b/libs/dyn/merge/merge_test.go index c4928e353..4a4bf9e6c 100644 --- a/libs/dyn/merge/merge_test.go +++ b/libs/dyn/merge/merge_test.go @@ -4,19 +4,21 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestMergeMaps(t *testing.T) { - v1 := dyn.V(map[string]dyn.Value{ - "foo": dyn.V("bar"), - "bar": dyn.V("baz"), - }) + l1 := dyn.Location{File: "file1", Line: 1, Column: 2} + v1 := dyn.NewValue(map[string]dyn.Value{ + "foo": dyn.NewValue("bar", []dyn.Location{l1}), + "bar": dyn.NewValue("baz", []dyn.Location{l1}), + }, []dyn.Location{l1}) - v2 := dyn.V(map[string]dyn.Value{ - "bar": dyn.V("qux"), - "qux": dyn.V("foo"), - }) + l2 := dyn.Location{File: "file2", Line: 3, Column: 4} + v2 := dyn.NewValue(map[string]dyn.Value{ + "bar": dyn.NewValue("qux", []dyn.Location{l2}), + "qux": dyn.NewValue("foo", []dyn.Location{l2}), + }, []dyn.Location{l2}) // Merge v2 into v1. { @@ -27,6 +29,23 @@ func TestMergeMaps(t *testing.T) { "bar": "qux", "qux": "foo", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l1, l2}, out.Locations()) + assert.Equal(t, []dyn.Location{l2, l1}, out.Get("bar").Locations()) + assert.Equal(t, []dyn.Location{l1}, out.Get("foo").Locations()) + assert.Equal(t, []dyn.Location{l2}, out.Get("qux").Locations()) + + // Location of the merged value should be the location of v1. + assert.Equal(t, l1, out.Location()) + + // Value of bar is "qux" which comes from v2. This .Location() should + // return the location of v2. + assert.Equal(t, l2, out.Get("bar").Location()) + + // Original locations of keys that were not overwritten should be preserved. + assert.Equal(t, l1, out.Get("foo").Location()) + assert.Equal(t, l2, out.Get("qux").Location()) } // Merge v1 into v2. @@ -38,30 +57,64 @@ func TestMergeMaps(t *testing.T) { "bar": "baz", "qux": "foo", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l2, l1}, out.Locations()) + assert.Equal(t, []dyn.Location{l1, l2}, out.Get("bar").Locations()) + assert.Equal(t, []dyn.Location{l1}, out.Get("foo").Locations()) + assert.Equal(t, []dyn.Location{l2}, out.Get("qux").Locations()) + + // Location of the merged value should be the location of v2. + assert.Equal(t, l2, out.Location()) + + // Value of bar is "baz" which comes from v1. This .Location() should + // return the location of v1. + assert.Equal(t, l1, out.Get("bar").Location()) + + // Original locations of keys that were not overwritten should be preserved. + assert.Equal(t, l1, out.Get("foo").Location()) + assert.Equal(t, l2, out.Get("qux").Location()) } + } func TestMergeMapsNil(t *testing.T) { - v := dyn.V(map[string]dyn.Value{ + l := dyn.Location{File: "file", Line: 1, Column: 2} + v := dyn.NewValue(map[string]dyn.Value{ "foo": dyn.V("bar"), - }) + }, []dyn.Location{l}) + + nilL := dyn.Location{File: "file", Line: 3, Column: 4} + nilV := dyn.NewValue(nil, []dyn.Location{nilL}) // Merge nil into v. { - out, err := Merge(v, dyn.NilValue) + out, err := Merge(v, nilV) assert.NoError(t, err) assert.Equal(t, map[string]any{ "foo": "bar", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l, nilL}, out.Locations()) + + // Location of the non-nil value should be returned by .Location(). + assert.Equal(t, l, out.Location()) } // Merge v into nil. { - out, err := Merge(dyn.NilValue, v) + out, err := Merge(nilV, v) assert.NoError(t, err) assert.Equal(t, map[string]any{ "foo": "bar", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l, nilL}, out.Locations()) + + // Location of the non-nil value should be returned by .Location(). + assert.Equal(t, l, out.Location()) } } @@ -76,20 +129,23 @@ func TestMergeMapsError(t *testing.T) { { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge map with string") - assert.Equal(t, dyn.NilValue, out) + assert.Equal(t, dyn.InvalidValue, out) } } func TestMergeSequences(t *testing.T) { - v1 := dyn.V([]dyn.Value{ - dyn.V("bar"), - dyn.V("baz"), - }) + l1 := dyn.Location{File: "file1", Line: 1, Column: 2} + v1 := dyn.NewValue([]dyn.Value{ + dyn.NewValue("bar", []dyn.Location{l1}), + dyn.NewValue("baz", []dyn.Location{l1}), + }, []dyn.Location{l1}) - v2 := dyn.V([]dyn.Value{ - dyn.V("qux"), - dyn.V("foo"), - }) + l2 := dyn.Location{File: "file2", Line: 3, Column: 4} + l3 := dyn.Location{File: "file3", Line: 5, Column: 6} + v2 := dyn.NewValue([]dyn.Value{ + dyn.NewValue("qux", []dyn.Location{l2}), + dyn.NewValue("foo", []dyn.Location{l3}), + }, []dyn.Location{l2, l3}) // Merge v2 into v1. { @@ -101,6 +157,18 @@ func TestMergeSequences(t *testing.T) { "qux", "foo", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l1, l2, l3}, out.Locations()) + + // Location of the merged value should be the location of v1. + assert.Equal(t, l1, out.Location()) + + // Location of the individual values should be preserved. + assert.Equal(t, l1, out.Index(0).Location()) // "bar" + assert.Equal(t, l1, out.Index(1).Location()) // "baz" + assert.Equal(t, l2, out.Index(2).Location()) // "qux" + assert.Equal(t, l3, out.Index(3).Location()) // "foo" } // Merge v1 into v2. @@ -113,6 +181,18 @@ func TestMergeSequences(t *testing.T) { "bar", "baz", }, out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l2, l3, l1}, out.Locations()) + + // Location of the merged value should be the location of v2. + assert.Equal(t, l2, out.Location()) + + // Location of the individual values should be preserved. + assert.Equal(t, l2, out.Index(0).Location()) // "qux" + assert.Equal(t, l3, out.Index(1).Location()) // "foo" + assert.Equal(t, l1, out.Index(2).Location()) // "bar" + assert.Equal(t, l1, out.Index(3).Location()) // "baz" } } @@ -151,19 +231,27 @@ func TestMergeSequencesError(t *testing.T) { { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge sequence with string") - assert.Equal(t, dyn.NilValue, out) + assert.Equal(t, dyn.InvalidValue, out) } } func TestMergePrimitives(t *testing.T) { - v1 := dyn.V("bar") - v2 := dyn.V("baz") + l1 := dyn.Location{File: "file1", Line: 1, Column: 2} + l2 := dyn.Location{File: "file2", Line: 3, Column: 4} + v1 := dyn.NewValue("bar", []dyn.Location{l1}) + v2 := dyn.NewValue("baz", []dyn.Location{l2}) // Merge v2 into v1. { out, err := Merge(v1, v2) assert.NoError(t, err) assert.Equal(t, "baz", out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l2, l1}, out.Locations()) + + // Location of the merged value should be the location of v2, the second value. + assert.Equal(t, l2, out.Location()) } // Merge v1 into v2. @@ -171,6 +259,12 @@ func TestMergePrimitives(t *testing.T) { out, err := Merge(v2, v1) assert.NoError(t, err) assert.Equal(t, "bar", out.AsAny()) + + // Locations of both values should be preserved. + assert.Equal(t, []dyn.Location{l1, l2}, out.Locations()) + + // Location of the merged value should be the location of v1, the second value. + assert.Equal(t, l1, out.Location()) } } @@ -202,6 +296,6 @@ func TestMergePrimitivesError(t *testing.T) { { out, err := Merge(v, other) assert.EqualError(t, err, "cannot merge string with map") - assert.Equal(t, dyn.NilValue, out) + assert.Equal(t, dyn.InvalidValue, out) } } diff --git a/libs/dyn/merge/override.go b/libs/dyn/merge/override.go new file mode 100644 index 000000000..7a8667cd6 --- /dev/null +++ b/libs/dyn/merge/override.go @@ -0,0 +1,211 @@ +package merge + +import ( + "errors" + "fmt" + + "github.com/databricks/cli/libs/dyn" +) + +// OverrideVisitor is visiting the changes during the override process +// and allows to control what changes are allowed, or update the effective +// value. +// +// For instance, it can disallow changes outside the specific path(s), or update +// the location of the effective value. +// +// Values returned by 'VisitInsert' and 'VisitUpdate' are used as the final value +// of the node. 'VisitDelete' can return ErrOverrideUndoDelete to undo delete. +// +// 'VisitDelete' is called when a value is removed from mapping or sequence +// 'VisitInsert' is called when a new value is added to mapping or sequence +// 'VisitUpdate' is called when a leaf value is updated +type OverrideVisitor struct { + VisitDelete func(valuePath dyn.Path, left dyn.Value) error + VisitInsert func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) + VisitUpdate func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) +} + +var ErrOverrideUndoDelete = errors.New("undo delete operation") + +// Override overrides value 'leftRoot' with 'rightRoot', keeping 'location' if values +// haven't changed. Preserving 'location' is important to preserve the original source of the value +// for error reporting. +func Override(leftRoot dyn.Value, rightRoot dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { + return override(dyn.EmptyPath, leftRoot, rightRoot, visitor) +} + +func override(basePath dyn.Path, left dyn.Value, right dyn.Value, visitor OverrideVisitor) (dyn.Value, error) { + if left.Kind() != right.Kind() { + return visitor.VisitUpdate(basePath, left, right) + } + + // NB: we only call 'VisitUpdate' on leaf values, and for sequences and mappings + // we don't know if value was updated or not + + switch left.Kind() { + case dyn.KindMap: + merged, err := overrideMapping(basePath, left.MustMap(), right.MustMap(), visitor) + + if err != nil { + return dyn.InvalidValue, err + } + + return dyn.NewValue(merged, left.Locations()), nil + + case dyn.KindSequence: + // some sequences are keyed, and we can detect which elements are added/removed/updated, + // but we don't have this information + merged, err := overrideSequence(basePath, left.MustSequence(), right.MustSequence(), visitor) + + if err != nil { + return dyn.InvalidValue, err + } + + return dyn.NewValue(merged, left.Locations()), nil + + case dyn.KindString: + if left.MustString() == right.MustString() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + + case dyn.KindFloat: + // TODO consider comparison with epsilon if normalization doesn't help, where do we use floats? + + if left.MustFloat() == right.MustFloat() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + + case dyn.KindBool: + if left.MustBool() == right.MustBool() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + + case dyn.KindTime: + if left.MustTime() == right.MustTime() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + + case dyn.KindInt: + if left.MustInt() == right.MustInt() { + return left, nil + } else { + return visitor.VisitUpdate(basePath, left, right) + } + case dyn.KindNil: + return left, nil + } + + return dyn.InvalidValue, fmt.Errorf("unexpected kind %s at %s", left.Kind(), basePath.String()) +} + +func overrideMapping(basePath dyn.Path, leftMapping dyn.Mapping, rightMapping dyn.Mapping, visitor OverrideVisitor) (dyn.Mapping, error) { + out := dyn.NewMapping() + + for _, leftPair := range leftMapping.Pairs() { + // detect if key was removed + if _, ok := rightMapping.GetPair(leftPair.Key); !ok { + path := basePath.Append(dyn.Key(leftPair.Key.MustString())) + + err := visitor.VisitDelete(path, leftPair.Value) + + // if 'delete' was undone, add it back + if errors.Is(err, ErrOverrideUndoDelete) { + err := out.Set(leftPair.Key, leftPair.Value) + if err != nil { + return dyn.NewMapping(), err + } + } else if err != nil { + return dyn.NewMapping(), err + } + } + } + + // iterating only right mapping will remove keys not present anymore + // and insert new keys + + for _, rightPair := range rightMapping.Pairs() { + if leftPair, ok := leftMapping.GetPair(rightPair.Key); ok { + path := basePath.Append(dyn.Key(rightPair.Key.MustString())) + newValue, err := override(path, leftPair.Value, rightPair.Value, visitor) + + if err != nil { + return dyn.NewMapping(), err + } + + // key was there before, so keep its location + err = out.Set(leftPair.Key, newValue) + + if err != nil { + return dyn.NewMapping(), err + } + } else { + path := basePath.Append(dyn.Key(rightPair.Key.MustString())) + + newValue, err := visitor.VisitInsert(path, rightPair.Value) + + if err != nil { + return dyn.NewMapping(), err + } + + err = out.Set(rightPair.Key, newValue) + + if err != nil { + return dyn.NewMapping(), err + } + } + } + + return out, nil +} + +func overrideSequence(basePath dyn.Path, left []dyn.Value, right []dyn.Value, visitor OverrideVisitor) ([]dyn.Value, error) { + minLen := min(len(left), len(right)) + var values []dyn.Value + + for i := 0; i < minLen; i++ { + path := basePath.Append(dyn.Index(i)) + merged, err := override(path, left[i], right[i], visitor) + + if err != nil { + return nil, err + } + + values = append(values, merged) + } + + if len(right) > len(left) { + for i := minLen; i < len(right); i++ { + path := basePath.Append(dyn.Index(i)) + newValue, err := visitor.VisitInsert(path, right[i]) + + if err != nil { + return nil, err + } + + values = append(values, newValue) + } + } else if len(left) > len(right) { + for i := minLen; i < len(left); i++ { + path := basePath.Append(dyn.Index(i)) + err := visitor.VisitDelete(path, left[i]) + + // if 'delete' was undone, add it back + if errors.Is(err, ErrOverrideUndoDelete) { + values = append(values, left[i]) + } else if err != nil { + return nil, err + } + } + } + + return values, nil +} diff --git a/libs/dyn/merge/override_test.go b/libs/dyn/merge/override_test.go new file mode 100644 index 000000000..9d41a526e --- /dev/null +++ b/libs/dyn/merge/override_test.go @@ -0,0 +1,519 @@ +package merge + +import ( + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +type overrideTestCase struct { + name string + left dyn.Value + right dyn.Value + state visitorState + expected dyn.Value +} + +func TestOverride_Primitive(t *testing.T) { + leftLocation := dyn.Location{File: "left.yml", Line: 1, Column: 1} + rightLocation := dyn.Location{File: "right.yml", Line: 1, Column: 1} + + modifiedTestCases := []overrideTestCase{ + { + name: "string (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue("a", []dyn.Location{leftLocation}), + right: dyn.NewValue("b", []dyn.Location{rightLocation}), + expected: dyn.NewValue("b", []dyn.Location{rightLocation}), + }, + { + name: "string (not updated)", + state: visitorState{}, + left: dyn.NewValue("a", []dyn.Location{leftLocation}), + right: dyn.NewValue("a", []dyn.Location{rightLocation}), + expected: dyn.NewValue("a", []dyn.Location{leftLocation}), + }, + { + name: "bool (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(true, []dyn.Location{leftLocation}), + right: dyn.NewValue(false, []dyn.Location{rightLocation}), + expected: dyn.NewValue(false, []dyn.Location{rightLocation}), + }, + { + name: "bool (not updated)", + state: visitorState{}, + left: dyn.NewValue(true, []dyn.Location{leftLocation}), + right: dyn.NewValue(true, []dyn.Location{rightLocation}), + expected: dyn.NewValue(true, []dyn.Location{leftLocation}), + }, + { + name: "int (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(1, []dyn.Location{leftLocation}), + right: dyn.NewValue(2, []dyn.Location{rightLocation}), + expected: dyn.NewValue(2, []dyn.Location{rightLocation}), + }, + { + name: "int (not updated)", + state: visitorState{}, + left: dyn.NewValue(int32(1), []dyn.Location{leftLocation}), + right: dyn.NewValue(int64(1), []dyn.Location{rightLocation}), + expected: dyn.NewValue(int32(1), []dyn.Location{leftLocation}), + }, + { + name: "float (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(1.0, []dyn.Location{leftLocation}), + right: dyn.NewValue(2.0, []dyn.Location{rightLocation}), + expected: dyn.NewValue(2.0, []dyn.Location{rightLocation}), + }, + { + name: "float (not updated)", + state: visitorState{}, + left: dyn.NewValue(float32(1.0), []dyn.Location{leftLocation}), + right: dyn.NewValue(float64(1.0), []dyn.Location{rightLocation}), + expected: dyn.NewValue(float32(1.0), []dyn.Location{leftLocation}), + }, + { + name: "time (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}), + right: dyn.NewValue(time.UnixMilli(10001), []dyn.Location{rightLocation}), + expected: dyn.NewValue(time.UnixMilli(10001), []dyn.Location{rightLocation}), + }, + { + name: "time (not updated)", + state: visitorState{}, + left: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}), + right: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{rightLocation}), + expected: dyn.NewValue(time.UnixMilli(10000), []dyn.Location{leftLocation}), + }, + { + name: "different types (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue("a", []dyn.Location{leftLocation}), + right: dyn.NewValue(42, []dyn.Location{rightLocation}), + expected: dyn.NewValue(42, []dyn.Location{rightLocation}), + }, + { + name: "map - remove 'a', update 'b'", + state: visitorState{ + removed: []string{"root.a"}, + updated: []string{"root.b"}, + }, + left: dyn.NewValue( + map[string]dyn.Value{ + "a": dyn.NewValue(42, []dyn.Location{leftLocation}), + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}), + + right: dyn.NewValue( + map[string]dyn.Value{ + "b": dyn.NewValue(20, []dyn.Location{rightLocation}), + }, + []dyn.Location{rightLocation}), + + expected: dyn.NewValue( + map[string]dyn.Value{ + "b": dyn.NewValue(20, []dyn.Location{rightLocation}), + }, + []dyn.Location{leftLocation}), + }, + { + name: "map - add 'a'", + state: visitorState{ + added: []string{"root.a"}, + }, + left: dyn.NewValue( + map[string]dyn.Value{ + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + + right: dyn.NewValue( + map[string]dyn.Value{ + "a": dyn.NewValue(42, []dyn.Location{rightLocation}), + "b": dyn.NewValue(10, []dyn.Location{rightLocation}), + }, + []dyn.Location{leftLocation}, + ), + + expected: dyn.NewValue( + map[string]dyn.Value{ + "a": dyn.NewValue(42, []dyn.Location{rightLocation}), + // location hasn't changed because value hasn't changed + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + }, + { + name: "map - remove 'a'", + state: visitorState{ + removed: []string{"root.a"}, + }, + left: dyn.NewValue( + map[string]dyn.Value{ + "a": dyn.NewValue(42, []dyn.Location{leftLocation}), + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + + right: dyn.NewValue( + map[string]dyn.Value{ + "b": dyn.NewValue(10, []dyn.Location{rightLocation}), + }, + []dyn.Location{leftLocation}, + ), + + expected: dyn.NewValue( + map[string]dyn.Value{ + // location hasn't changed because value hasn't changed + "b": dyn.NewValue(10, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + }, + { + name: "map - add 'jobs.job_1'", + state: visitorState{ + added: []string{"root.jobs.job_1"}, + }, + left: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + }, + []dyn.Location{leftLocation}, + ), + + right: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, []dyn.Location{rightLocation}), + "job_1": dyn.NewValue(1337, []dyn.Location{rightLocation}), + }, + []dyn.Location{rightLocation}, + ), + }, + []dyn.Location{rightLocation}, + ), + + expected: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, []dyn.Location{leftLocation}), + "job_1": dyn.NewValue(1337, []dyn.Location{rightLocation}), + }, + []dyn.Location{leftLocation}, + ), + }, + []dyn.Location{leftLocation}, + ), + }, + { + name: "map - remove nested key", + state: visitorState{removed: []string{"root.jobs.job_1"}}, + left: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, []dyn.Location{leftLocation}), + "job_1": dyn.NewValue(1337, []dyn.Location{rightLocation}), + }, + []dyn.Location{leftLocation}, + ), + }, + []dyn.Location{leftLocation}, + ), + right: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, []dyn.Location{rightLocation}), + }, + []dyn.Location{rightLocation}, + ), + }, + []dyn.Location{rightLocation}, + ), + expected: dyn.NewValue( + map[string]dyn.Value{ + "jobs": dyn.NewValue( + map[string]dyn.Value{ + "job_0": dyn.NewValue(42, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + }, + []dyn.Location{leftLocation}, + ), + }, + { + name: "sequence - add", + state: visitorState{added: []string{"root[1]"}}, + left: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + right: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, []dyn.Location{rightLocation}), + dyn.NewValue(10, []dyn.Location{rightLocation}), + }, + []dyn.Location{rightLocation}, + ), + expected: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, []dyn.Location{leftLocation}), + dyn.NewValue(10, []dyn.Location{rightLocation}), + }, + []dyn.Location{leftLocation}, + ), + }, + { + name: "sequence - remove", + state: visitorState{removed: []string{"root[1]"}}, + left: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, []dyn.Location{leftLocation}), + dyn.NewValue(10, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + right: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, []dyn.Location{rightLocation}), + }, + []dyn.Location{rightLocation}, + ), + expected: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + // location hasn't changed because value hasn't changed + }, + { + name: "sequence (not updated)", + state: visitorState{}, + left: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + right: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, []dyn.Location{rightLocation}), + }, + []dyn.Location{rightLocation}, + ), + expected: dyn.NewValue( + []dyn.Value{ + dyn.NewValue(42, []dyn.Location{leftLocation}), + }, + []dyn.Location{leftLocation}, + ), + }, + { + name: "nil (not updated)", + state: visitorState{}, + left: dyn.NilValue.WithLocations([]dyn.Location{leftLocation}), + right: dyn.NilValue.WithLocations([]dyn.Location{rightLocation}), + expected: dyn.NilValue.WithLocations([]dyn.Location{leftLocation}), + }, + { + name: "nil (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NilValue, + right: dyn.NewValue(42, []dyn.Location{rightLocation}), + expected: dyn.NewValue(42, []dyn.Location{rightLocation}), + }, + { + name: "change kind (updated)", + state: visitorState{updated: []string{"root"}}, + left: dyn.NewValue(42.0, []dyn.Location{leftLocation}), + right: dyn.NewValue(42, []dyn.Location{rightLocation}), + expected: dyn.NewValue(42, []dyn.Location{rightLocation}), + }, + } + + for _, tc := range modifiedTestCases { + t.Run(tc.name, func(t *testing.T) { + s, visitor := createVisitor(visitorOpts{}) + out, err := override(dyn.NewPath(dyn.Key("root")), tc.left, tc.right, visitor) + + assert.NoError(t, err) + assert.Equal(t, tc.state, *s) + assert.Equal(t, tc.expected, out) + }) + + modified := len(tc.state.removed)+len(tc.state.added)+len(tc.state.updated) > 0 + + // visitor is not used unless there is a change + + if modified { + t.Run(tc.name+" - visitor has error", func(t *testing.T) { + _, visitor := createVisitor(visitorOpts{error: fmt.Errorf("unexpected change in test")}) + _, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) + + assert.EqualError(t, err, "unexpected change in test") + }) + + t.Run(tc.name+" - visitor overrides value", func(t *testing.T) { + expected := dyn.V("return value") + s, visitor := createVisitor(visitorOpts{returnValue: &expected}) + out, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) + + assert.NoError(t, err) + + for _, added := range s.added { + actual, err := dyn.GetByPath(out, dyn.MustPathFromString(added)) + + assert.NoError(t, err) + assert.Equal(t, expected, actual) + } + + for _, updated := range s.updated { + actual, err := dyn.GetByPath(out, dyn.MustPathFromString(updated)) + + assert.NoError(t, err) + assert.Equal(t, expected, actual) + } + }) + + if len(tc.state.removed) > 0 { + t.Run(tc.name+" - visitor can undo delete", func(t *testing.T) { + s, visitor := createVisitor(visitorOpts{deleteError: ErrOverrideUndoDelete}) + out, err := override(dyn.EmptyPath, tc.left, tc.right, visitor) + require.NoError(t, err) + + for _, removed := range s.removed { + expected, err := dyn.GetByPath(tc.left, dyn.MustPathFromString(removed)) + require.NoError(t, err) + + actual, err := dyn.GetByPath(out, dyn.MustPathFromString(removed)) + + assert.NoError(t, err) + assert.Equal(t, expected, actual) + } + }) + } + } + } +} + +func TestOverride_PreserveMappingKeys(t *testing.T) { + leftLocation := dyn.Location{File: "left.yml", Line: 1, Column: 1} + leftKeyLocation := dyn.Location{File: "left.yml", Line: 2, Column: 1} + leftValueLocation := dyn.Location{File: "left.yml", Line: 3, Column: 1} + + rightLocation := dyn.Location{File: "right.yml", Line: 1, Column: 1} + rightKeyLocation := dyn.Location{File: "right.yml", Line: 2, Column: 1} + rightValueLocation := dyn.Location{File: "right.yml", Line: 3, Column: 1} + + left := dyn.NewMapping() + left.Set(dyn.NewValue("a", []dyn.Location{leftKeyLocation}), dyn.NewValue(42, []dyn.Location{leftValueLocation})) + + right := dyn.NewMapping() + right.Set(dyn.NewValue("a", []dyn.Location{rightKeyLocation}), dyn.NewValue(7, []dyn.Location{rightValueLocation})) + + state, visitor := createVisitor(visitorOpts{}) + + out, err := override( + dyn.EmptyPath, + dyn.NewValue(left, []dyn.Location{leftLocation}), + dyn.NewValue(right, []dyn.Location{rightLocation}), + visitor, + ) + + assert.NoError(t, err) + + if err != nil { + outPairs := out.MustMap().Pairs() + + assert.Equal(t, visitorState{updated: []string{"a"}}, state) + assert.Equal(t, 1, len(outPairs)) + + // mapping was first defined in left, so it should keep its location + assert.Equal(t, leftLocation, out.Location()) + + // if there is a validation error for key value, it should point + // to where it was initially defined + assert.Equal(t, leftKeyLocation, outPairs[0].Key.Location()) + + // the value should have updated location, because it has changed + assert.Equal(t, rightValueLocation, outPairs[0].Value.Location()) + } +} + +type visitorState struct { + added []string + removed []string + updated []string +} + +type visitorOpts struct { + error error + deleteError error + returnValue *dyn.Value +} + +func createVisitor(opts visitorOpts) (*visitorState, OverrideVisitor) { + s := visitorState{} + + return &s, OverrideVisitor{ + VisitUpdate: func(valuePath dyn.Path, left dyn.Value, right dyn.Value) (dyn.Value, error) { + s.updated = append(s.updated, valuePath.String()) + + if opts.error != nil { + return dyn.NilValue, opts.error + } else if opts.returnValue != nil { + return *opts.returnValue, nil + } else { + return right, nil + } + }, + VisitDelete: func(valuePath dyn.Path, left dyn.Value) error { + s.removed = append(s.removed, valuePath.String()) + + if opts.error != nil { + return opts.error + } else if opts.deleteError != nil { + return opts.deleteError + } else { + return nil + } + }, + VisitInsert: func(valuePath dyn.Path, right dyn.Value) (dyn.Value, error) { + s.added = append(s.added, valuePath.String()) + + if opts.error != nil { + return dyn.NilValue, opts.error + } else if opts.returnValue != nil { + return *opts.returnValue, nil + } else { + return right, nil + } + }, + } +} diff --git a/libs/dyn/path.go b/libs/dyn/path.go index 34285de14..76377e2dc 100644 --- a/libs/dyn/path.go +++ b/libs/dyn/path.go @@ -10,6 +10,14 @@ type pathComponent struct { index int } +func (c pathComponent) Key() string { + return c.key +} + +func (c pathComponent) Index() int { + return c.index +} + func (c pathComponent) isKey() bool { return c.key != "" } @@ -41,17 +49,13 @@ func NewPath(cs ...pathComponent) Path { return cs } -// Join joins the given paths. -func (p Path) Join(qs ...Path) Path { - for _, q := range qs { - p = p.Append(q...) - } - return p -} - // Append appends the given components to the path. +// Mutations to the returned path do not affect the original path. func (p Path) Append(cs ...pathComponent) Path { - return append(p, cs...) + out := make(Path, len(p)+len(cs)) + copy(out, p) + copy(out[len(p):], cs) + return out } // Equal returns true if the paths are equal. diff --git a/libs/dyn/path_string_test.go b/libs/dyn/path_string_test.go index 9af394c6f..0d64bf110 100644 --- a/libs/dyn/path_string_test.go +++ b/libs/dyn/path_string_test.go @@ -5,7 +5,7 @@ import ( "testing" . "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestNewPathFromString(t *testing.T) { diff --git a/libs/dyn/path_test.go b/libs/dyn/path_test.go index c4ea26c4a..44df2050b 100644 --- a/libs/dyn/path_test.go +++ b/libs/dyn/path_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestPathAppend(t *testing.T) { @@ -19,16 +19,14 @@ func TestPathAppend(t *testing.T) { assert.True(t, p2.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar"), dyn.Index(1)))) } -func TestPathJoin(t *testing.T) { - p := dyn.NewPath(dyn.Key("foo")) +func TestPathAppendAlwaysNew(t *testing.T) { + p := make(dyn.Path, 0, 2) + p = append(p, dyn.Key("foo")) - // Single arg. - p1 := p.Join(dyn.NewPath(dyn.Key("bar"))) - assert.True(t, p1.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar")))) - - // Multiple args. - p2 := p.Join(dyn.NewPath(dyn.Key("bar")), dyn.NewPath(dyn.Index(1))) - assert.True(t, p2.Equal(dyn.NewPath(dyn.Key("foo"), dyn.Key("bar"), dyn.Index(1)))) + // There is room for a second element in the slice. + p1 := p.Append(dyn.Index(1)) + p2 := p.Append(dyn.Index(2)) + assert.NotEqual(t, p1, p2) } func TestPathEqualEmpty(t *testing.T) { diff --git a/libs/dyn/pattern.go b/libs/dyn/pattern.go new file mode 100644 index 000000000..aecdc3ca6 --- /dev/null +++ b/libs/dyn/pattern.go @@ -0,0 +1,107 @@ +package dyn + +import ( + "fmt" + "slices" +) + +// Pattern represents a matcher for paths in a [Value] configuration tree. +// It is used by [MapByPattern] to apply a function to the values whose paths match the pattern. +// Every [Path] is a valid [Pattern] that matches a single unique path. +// The reverse is not true; not every [Pattern] is a valid [Path], as patterns may contain wildcards. +type Pattern []patternComponent + +// A pattern component can visit a [Value] and recursively call into [visit] for matching elements. +// Fixed components can match a single key or index, while wildcards can match any key or index. +type patternComponent interface { + visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) +} + +// NewPattern returns a new pattern from the given components. +// The individual components may be created with [Key], [Index], or [Any]. +func NewPattern(cs ...patternComponent) Pattern { + return cs +} + +// NewPatternFromPath returns a new pattern from the given path. +func NewPatternFromPath(p Path) Pattern { + cs := make(Pattern, len(p)) + for i, c := range p { + cs[i] = c + } + return cs +} + +// Append appends the given components to the pattern. +func (p Pattern) Append(cs ...patternComponent) Pattern { + out := make(Pattern, len(p)+len(cs)) + copy(out, p) + copy(out[len(p):], cs) + return out +} + +type anyKeyComponent struct{} + +// AnyKey returns a pattern component that matches any key. +func AnyKey() patternComponent { + return anyKeyComponent{} +} + +// This function implements the patternComponent interface. +func (c anyKeyComponent) visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) { + m, ok := v.AsMap() + if !ok { + return InvalidValue, fmt.Errorf("expected a map at %q, found %s", prefix, v.Kind()) + } + + m = m.Clone() + for _, pair := range m.Pairs() { + pk := pair.Key + pv := pair.Value + + var err error + nv, err := visit(pv, append(prefix, Key(pk.MustString())), suffix, opts) + if err != nil { + // Leave the value intact if the suffix pattern didn't match any value. + if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { + continue + } + return InvalidValue, err + } + + m.Set(pk, nv) + } + + return NewValue(m, v.Locations()), nil +} + +type anyIndexComponent struct{} + +// AnyIndex returns a pattern component that matches any index. +func AnyIndex() patternComponent { + return anyIndexComponent{} +} + +// This function implements the patternComponent interface. +func (c anyIndexComponent) visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) { + s, ok := v.AsSequence() + if !ok { + return InvalidValue, fmt.Errorf("expected a sequence at %q, found %s", prefix, v.Kind()) + } + + s = slices.Clone(s) + for i, value := range s { + var err error + nv, err := visit(value, append(prefix, Index(i)), suffix, opts) + if err != nil { + // Leave the value intact if the suffix pattern didn't match any value. + if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { + continue + } + return InvalidValue, err + } + s[i] = nv + } + + return NewValue(s, v.Locations()), nil +} diff --git a/libs/dyn/pattern_test.go b/libs/dyn/pattern_test.go new file mode 100644 index 000000000..1b54953ef --- /dev/null +++ b/libs/dyn/pattern_test.go @@ -0,0 +1,50 @@ +package dyn_test + +import ( + "testing" + + "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" +) + +func TestNewPattern(t *testing.T) { + pat := dyn.NewPattern( + dyn.Key("foo"), + dyn.Index(1), + ) + + assert.Len(t, pat, 2) +} + +func TestNewPatternFromPath(t *testing.T) { + path := dyn.NewPath( + dyn.Key("foo"), + dyn.Index(1), + ) + + pat1 := dyn.NewPattern(dyn.Key("foo"), dyn.Index(1)) + pat2 := dyn.NewPatternFromPath(path) + assert.Equal(t, pat1, pat2) +} + +func TestPatternAppend(t *testing.T) { + p := dyn.NewPattern(dyn.Key("foo")) + + // Single arg. + p1 := p.Append(dyn.Key("bar")) + assert.Equal(t, dyn.NewPattern(dyn.Key("foo"), dyn.Key("bar")), p1) + + // Multiple args. + p2 := p.Append(dyn.Key("bar"), dyn.Index(1)) + assert.Equal(t, dyn.NewPattern(dyn.Key("foo"), dyn.Key("bar"), dyn.Index(1)), p2) +} + +func TestPatternAppendAlwaysNew(t *testing.T) { + p := make(dyn.Pattern, 0, 2) + p = append(p, dyn.Key("foo")) + + // There is room for a second element in the slice. + p1 := p.Append(dyn.Index(1)) + p2 := p.Append(dyn.Index(2)) + assert.NotEqual(t, p1, p2) +} diff --git a/libs/dyn/value.go b/libs/dyn/value.go index a487e13e1..2aed2f6cd 100644 --- a/libs/dyn/value.go +++ b/libs/dyn/value.go @@ -2,13 +2,18 @@ package dyn import ( "fmt" + "slices" ) type Value struct { v any k Kind - l Location + + // List of locations this value is defined at. The first location in the slice + // is the location returned by the `.Location()` method and is typically used + // for reporting errors and warnings associated with the value. + l []Location // Whether or not this value is an anchor. // If this node doesn't map to a type, we don't need to warn about it. @@ -27,18 +32,43 @@ var NilValue = Value{ // V constructs a new Value with the given value. func V(v any) Value { - return Value{ - v: v, - k: kindOf(v), - } + return NewValue(v, []Location{}) } // NewValue constructs a new Value with the given value and location. -func NewValue(v any, loc Location) Value { +func NewValue(v any, loc []Location) Value { + switch vin := v.(type) { + case map[string]Value: + v = newMappingFromGoMap(vin) + } + return Value{ v: v, k: kindOf(v), - l: loc, + + // create a copy of the locations, so that mutations to the original slice + // don't affect new value. + l: slices.Clone(loc), + } +} + +// WithLocations returns a new Value with its location set to the given value. +func (v Value) WithLocations(loc []Location) Value { + return Value{ + v: v.v, + k: v.k, + + // create a copy of the locations, so that mutations to the original slice + // don't affect new value. + l: slices.Clone(loc), + } +} + +func (v Value) AppendLocationsFromValue(w Value) Value { + return Value{ + v: v.v, + k: v.k, + l: append(v.l, w.l...), } } @@ -50,10 +80,18 @@ func (v Value) Value() any { return v.v } -func (v Value) Location() Location { +func (v Value) Locations() []Location { return v.l } +func (v Value) Location() Location { + if len(v.l) == 0 { + return Location{} + } + + return v.l[0] +} + func (v Value) IsValid() bool { return v.k != KindInvalid } @@ -63,12 +101,14 @@ func (v Value) AsAny() any { case KindInvalid: panic("invoked AsAny on invalid value") case KindMap: - vv := v.v.(map[string]Value) - m := make(map[string]any, len(vv)) - for k, v := range vv { - m[k] = v.AsAny() + m := v.v.(Mapping) + out := make(map[string]any, m.Len()) + for _, pair := range m.pairs { + pk := pair.Key + pv := pair.Value + out[pk.MustString()] = pv.AsAny() } - return m + return out case KindSequence: vv := v.v.([]Value) a := make([]any, len(vv)) @@ -97,12 +137,12 @@ func (v Value) AsAny() any { func (v Value) Get(key string) Value { m, ok := v.AsMap() if !ok { - return NilValue + return InvalidValue } - vv, ok := m[key] + vv, ok := m.GetByString(key) if !ok { - return NilValue + return InvalidValue } return vv @@ -111,11 +151,11 @@ func (v Value) Get(key string) Value { func (v Value) Index(i int) Value { s, ok := v.v.([]Value) if !ok { - return NilValue + return InvalidValue } if i < 0 || i >= len(s) { - return NilValue + return InvalidValue } return s[i] @@ -140,7 +180,10 @@ func (v Value) IsAnchor() bool { // We need a custom implementation because maps and slices // cannot be compared with the regular == operator. func (v Value) eq(w Value) bool { - if v.k != w.k || v.l != w.l { + if v.k != w.k { + return false + } + if !slices.Equal(v.l, w.l) { return false } @@ -150,11 +193,22 @@ func (v Value) eq(w Value) bool { // This is safe because we don't allow maps to be mutated. return &v.v == &w.v case KindSequence: - // Compare pointers to the underlying slice and slice length. - // This is safe because we don't allow slices to be mutated. vs := v.v.([]Value) ws := w.v.([]Value) - return &vs[0] == &ws[0] && len(vs) == len(ws) + lv := len(vs) + lw := len(ws) + // If both slices are empty, they are equal. + if lv == 0 && lw == 0 { + return true + } + // If they have different lengths, they are not equal. + if lv != lw { + return false + } + // They are both non-empty and have the same length. + // Compare pointers to the underlying slice. + // This is safe because we don't allow slices to be mutated. + return &vs[0] == &ws[0] default: return v.v == w.v } diff --git a/libs/dyn/value_test.go b/libs/dyn/value_test.go index 7c9a9d990..6a0a27b8d 100644 --- a/libs/dyn/value_test.go +++ b/libs/dyn/value_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestInvalidValue(t *testing.T) { @@ -22,29 +22,30 @@ func TestValueIsAnchor(t *testing.T) { func TestValueAsMap(t *testing.T) { var zeroValue dyn.Value - m, ok := zeroValue.AsMap() + _, ok := zeroValue.AsMap() assert.False(t, ok) - assert.Nil(t, m) - var intValue = dyn.NewValue(1, dyn.Location{}) - m, ok = intValue.AsMap() + var intValue = dyn.V(1) + _, ok = intValue.AsMap() assert.False(t, ok) - assert.Nil(t, m) var mapValue = dyn.NewValue( map[string]dyn.Value{ - "key": dyn.NewValue("value", dyn.Location{File: "file", Line: 1, Column: 2}), + "key": dyn.NewValue( + "value", + []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ) - m, ok = mapValue.AsMap() + + m, ok := mapValue.AsMap() assert.True(t, ok) - assert.Len(t, m, 1) + assert.Equal(t, 1, m.Len()) } func TestValueIsValid(t *testing.T) { var zeroValue dyn.Value assert.False(t, zeroValue.IsValid()) - var intValue = dyn.NewValue(1, dyn.Location{}) + var intValue = dyn.V(1) assert.True(t, intValue.IsValid()) } diff --git a/libs/dyn/value_underlying.go b/libs/dyn/value_underlying.go index c8c503790..2f0f26a1f 100644 --- a/libs/dyn/value_underlying.go +++ b/libs/dyn/value_underlying.go @@ -5,16 +5,16 @@ import ( "time" ) -// AsMap returns the underlying map if this value is a map, +// AsMap returns the underlying mapping if this value is a map, // the zero value and false otherwise. -func (v Value) AsMap() (map[string]Value, bool) { - vv, ok := v.v.(map[string]Value) +func (v Value) AsMap() (Mapping, bool) { + vv, ok := v.v.(Mapping) return vv, ok } -// MustMap returns the underlying map if this value is a map, +// MustMap returns the underlying mapping if this value is a map, // panics otherwise. -func (v Value) MustMap() map[string]Value { +func (v Value) MustMap() Mapping { vv, ok := v.AsMap() if !ok || v.k != KindMap { panic(fmt.Sprintf("expected kind %s, got %s", KindMap, v.k)) diff --git a/libs/dyn/value_underlying_test.go b/libs/dyn/value_underlying_test.go index 17cb95941..e35cde582 100644 --- a/libs/dyn/value_underlying_test.go +++ b/libs/dyn/value_underlying_test.go @@ -5,50 +5,50 @@ import ( "time" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestValueUnderlyingMap(t *testing.T) { v := dyn.V( map[string]dyn.Value{ - "key": dyn.NewValue("value", dyn.Location{File: "file", Line: 1, Column: 2}), + "key": dyn.NewValue("value", []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, ) vv1, ok := v.AsMap() assert.True(t, ok) - _, ok = dyn.NilValue.AsMap() + _, ok = dyn.InvalidValue.AsMap() assert.False(t, ok) vv2 := v.MustMap() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind map, got nil", func() { - dyn.NilValue.MustMap() + assert.PanicsWithValue(t, "expected kind map, got invalid", func() { + dyn.InvalidValue.MustMap() }) } func TestValueUnderlyingSequence(t *testing.T) { v := dyn.V( []dyn.Value{ - dyn.NewValue("value", dyn.Location{File: "file", Line: 1, Column: 2}), + dyn.NewValue("value", []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, ) vv1, ok := v.AsSequence() assert.True(t, ok) - _, ok = dyn.NilValue.AsSequence() + _, ok = dyn.InvalidValue.AsSequence() assert.False(t, ok) vv2 := v.MustSequence() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind sequence, got nil", func() { - dyn.NilValue.MustSequence() + assert.PanicsWithValue(t, "expected kind sequence, got invalid", func() { + dyn.InvalidValue.MustSequence() }) } @@ -58,15 +58,15 @@ func TestValueUnderlyingString(t *testing.T) { vv1, ok := v.AsString() assert.True(t, ok) - _, ok = dyn.NilValue.AsString() + _, ok = dyn.InvalidValue.AsString() assert.False(t, ok) vv2 := v.MustString() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind string, got nil", func() { - dyn.NilValue.MustString() + assert.PanicsWithValue(t, "expected kind string, got invalid", func() { + dyn.InvalidValue.MustString() }) } @@ -76,15 +76,15 @@ func TestValueUnderlyingBool(t *testing.T) { vv1, ok := v.AsBool() assert.True(t, ok) - _, ok = dyn.NilValue.AsBool() + _, ok = dyn.InvalidValue.AsBool() assert.False(t, ok) vv2 := v.MustBool() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind bool, got nil", func() { - dyn.NilValue.MustBool() + assert.PanicsWithValue(t, "expected kind bool, got invalid", func() { + dyn.InvalidValue.MustBool() }) } @@ -94,15 +94,15 @@ func TestValueUnderlyingInt(t *testing.T) { vv1, ok := v.AsInt() assert.True(t, ok) - _, ok = dyn.NilValue.AsInt() + _, ok = dyn.InvalidValue.AsInt() assert.False(t, ok) vv2 := v.MustInt() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind int, got nil", func() { - dyn.NilValue.MustInt() + assert.PanicsWithValue(t, "expected kind int, got invalid", func() { + dyn.InvalidValue.MustInt() }) // Test int32 type specifically. @@ -124,15 +124,15 @@ func TestValueUnderlyingFloat(t *testing.T) { vv1, ok := v.AsFloat() assert.True(t, ok) - _, ok = dyn.NilValue.AsFloat() + _, ok = dyn.InvalidValue.AsFloat() assert.False(t, ok) vv2 := v.MustFloat() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind float, got nil", func() { - dyn.NilValue.MustFloat() + assert.PanicsWithValue(t, "expected kind float, got invalid", func() { + dyn.InvalidValue.MustFloat() }) // Test float64 type specifically. @@ -148,14 +148,14 @@ func TestValueUnderlyingTime(t *testing.T) { vv1, ok := v.AsTime() assert.True(t, ok) - _, ok = dyn.NilValue.AsTime() + _, ok = dyn.InvalidValue.AsTime() assert.False(t, ok) vv2 := v.MustTime() assert.Equal(t, vv1, vv2) // Test panic. - assert.PanicsWithValue(t, "expected kind time, got nil", func() { - dyn.NilValue.MustTime() + assert.PanicsWithValue(t, "expected kind time, got invalid", func() { + dyn.InvalidValue.MustTime() }) } diff --git a/libs/dyn/visit.go b/libs/dyn/visit.go index 077fd51c5..4d3cf5014 100644 --- a/libs/dyn/visit.go +++ b/libs/dyn/visit.go @@ -3,10 +3,31 @@ package dyn import ( "errors" "fmt" - "maps" "slices" ) +// This error is returned if the path indicates that a map or sequence is expected, but the value is nil. +type cannotTraverseNilError struct { + p Path +} + +func (e cannotTraverseNilError) Error() string { + component := e.p[len(e.p)-1] + switch { + case component.isKey(): + return fmt.Sprintf("expected a map to index %q, found nil", e.p) + case component.isIndex(): + return fmt.Sprintf("expected a sequence to index %q, found nil", e.p) + default: + panic("invalid component") + } +} + +func IsCannotTraverseNilError(err error) bool { + var target cannotTraverseNilError + return errors.As(err, &target) +} + type noSuchKeyError struct { p Path } @@ -44,16 +65,12 @@ type visitOptions struct { // // If this function returns an error, the original visit function call // returns this error and the value is left unmodified. - fn func(Value) (Value, error) - - // If set, tolerate the absence of the last component in the path. - // This option is needed to set a key in a map that is not yet present. - allowMissingKeyInMap bool + fn func(Path, Value) (Value, error) } -func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { +func visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) { if len(suffix) == 0 { - return opts.fn(v) + return opts.fn(prefix, v) } // Initialize prefix if it is empty. @@ -63,25 +80,37 @@ func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { } component := suffix[0] - prefix = prefix.Append(component) suffix = suffix[1:] + // Visit the value with the current component. + return component.visit(v, prefix, suffix, opts) +} + +func (component pathComponent) visit(v Value, prefix Path, suffix Pattern, opts visitOptions) (Value, error) { + path := append(prefix, component) + switch { case component.isKey(): // Expect a map to be set if this is a key. - m, ok := v.AsMap() - if !ok { - return InvalidValue, fmt.Errorf("expected a map to index %q, found %s", prefix, v.Kind()) + switch v.Kind() { + case KindMap: + // OK + case KindNil: + return InvalidValue, cannotTraverseNilError{path} + default: + return InvalidValue, fmt.Errorf("expected a map to index %q, found %s", path, v.Kind()) } + m := v.MustMap() + // Lookup current value in the map. - ev, ok := m[component.key] - if !ok && !opts.allowMissingKeyInMap { - return InvalidValue, noSuchKeyError{prefix} + ev, ok := m.GetByString(component.key) + if !ok { + return InvalidValue, noSuchKeyError{path} } // Recursively transform the value. - nv, err := visit(ev, prefix, suffix, opts) + nv, err := visit(ev, path, suffix, opts) if err != nil { return InvalidValue, err } @@ -92,8 +121,8 @@ func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { } // Return an updated map value. - m = maps.Clone(m) - m[component.key] = nv + m = m.Clone() + m.Set(V(component.key), nv) return Value{ v: m, k: KindMap, @@ -102,19 +131,25 @@ func visit(v Value, prefix, suffix Path, opts visitOptions) (Value, error) { case component.isIndex(): // Expect a sequence to be set if this is an index. - s, ok := v.AsSequence() - if !ok { - return InvalidValue, fmt.Errorf("expected a sequence to index %q, found %s", prefix, v.Kind()) + switch v.Kind() { + case KindSequence: + // OK + case KindNil: + return InvalidValue, cannotTraverseNilError{path} + default: + return InvalidValue, fmt.Errorf("expected a sequence to index %q, found %s", path, v.Kind()) } + s := v.MustSequence() + // Lookup current value in the sequence. if component.index < 0 || component.index >= len(s) { - return InvalidValue, indexOutOfBoundsError{prefix} + return InvalidValue, indexOutOfBoundsError{path} } // Recursively transform the value. ev := s[component.index] - nv, err := visit(ev, prefix, suffix, opts) + nv, err := visit(ev, path, suffix, opts) if err != nil { return InvalidValue, err } diff --git a/libs/dyn/visit_get.go b/libs/dyn/visit_get.go index a0f848cdd..101c38aff 100644 --- a/libs/dyn/visit_get.go +++ b/libs/dyn/visit_get.go @@ -14,8 +14,8 @@ func Get(v Value, path string) (Value, error) { // If the path doesn't exist, it returns InvalidValue and an error. func GetByPath(v Value, p Path) (Value, error) { out := InvalidValue - _, err := visit(v, EmptyPath, p, visitOptions{ - fn: func(ev Value) (Value, error) { + _, err := visit(v, EmptyPath, NewPatternFromPath(p), visitOptions{ + fn: func(_ Path, ev Value) (Value, error) { // Capture the value argument to return it. out = ev return ev, nil diff --git a/libs/dyn/visit_get_test.go b/libs/dyn/visit_get_test.go index 22dce0858..adc307794 100644 --- a/libs/dyn/visit_get_test.go +++ b/libs/dyn/visit_get_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestGetWithEmptyPath(t *testing.T) { diff --git a/libs/dyn/visit_map.go b/libs/dyn/visit_map.go index ed89baa4a..cd2cd4831 100644 --- a/libs/dyn/visit_map.go +++ b/libs/dyn/visit_map.go @@ -2,45 +2,49 @@ package dyn import ( "fmt" - "maps" "slices" ) // MapFunc is a function that maps a value to another value. -type MapFunc func(Value) (Value, error) +type MapFunc func(Path, Value) (Value, error) // Foreach returns a [MapFunc] that applies the specified [MapFunc] to each // value in a map or sequence and returns the new map or sequence. +// If the input is nil, it returns nil. func Foreach(fn MapFunc) MapFunc { - return func(v Value) (Value, error) { + return func(p Path, v Value) (Value, error) { switch v.Kind() { + case KindNil: + return v, nil case KindMap: - m := maps.Clone(v.MustMap()) - for key, value := range m { - var err error - m[key], err = fn(value) + m := v.MustMap().Clone() + for _, pair := range m.Pairs() { + pk := pair.Key + pv := pair.Value + nv, err := fn(append(p, Key(pk.MustString())), pv) if err != nil { return InvalidValue, err } + m.Set(pk, nv) } - return NewValue(m, v.Location()), nil + return NewValue(m, v.Locations()), nil case KindSequence: s := slices.Clone(v.MustSequence()) for i, value := range s { var err error - s[i], err = fn(value) + s[i], err = fn(append(p, Index(i)), value) if err != nil { return InvalidValue, err } } - return NewValue(s, v.Location()), nil + return NewValue(s, v.Locations()), nil default: return InvalidValue, fmt.Errorf("expected a map or sequence, found %s", v.Kind()) } } } -// Map applies the given function to the value at the specified path in the specified value. +// Map applies a function to the value at the given path in the given value. // It is identical to [MapByPath], except that it takes a string path instead of a [Path]. func Map(v Value, path string, fn MapFunc) (Value, error) { p, err := NewPathFromString(path) @@ -50,15 +54,21 @@ func Map(v Value, path string, fn MapFunc) (Value, error) { return MapByPath(v, p, fn) } -// Map applies the given function to the value at the specified path in the specified value. +// MapByPath applies a function to the value at the given path in the given value. +// It is identical to [MapByPattern], except that it takes a [Path] instead of a [Pattern]. +// This means it only matches a single value, not a pattern of values. +func MapByPath(v Value, p Path, fn MapFunc) (Value, error) { + return MapByPattern(v, NewPatternFromPath(p), fn) +} + +// MapByPattern applies a function to the values whose paths match the given pattern in the given value. // If successful, it returns the new value with all intermediate values copied and updated. // -// If the path contains a key that doesn't exist, or an index that is out of bounds, -// it returns the original value and no error. This is because setting a value at a path -// that doesn't exist is a no-op. +// If the pattern contains a key that doesn't exist, or an index that is out of bounds, +// it returns the original value and no error. // -// If the path is invalid for the given value, it returns InvalidValue and an error. -func MapByPath(v Value, p Path, fn MapFunc) (Value, error) { +// If the pattern is invalid for the given value, it returns InvalidValue and an error. +func MapByPattern(v Value, p Pattern, fn MapFunc) (Value, error) { nv, err := visit(v, EmptyPath, p, visitOptions{ fn: fn, }) @@ -68,8 +78,10 @@ func MapByPath(v Value, p Path, fn MapFunc) (Value, error) { return nv, nil } - // Return original value if a key or index is missing. - if IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { + // Return original value if: + // - any map or sequence is a nil, or + // - a key or index is missing + if IsCannotTraverseNilError(err) || IsNoSuchKeyError(err) || IsIndexOutOfBoundsError(err) { return v, nil } diff --git a/libs/dyn/visit_map_test.go b/libs/dyn/visit_map_test.go index a5af3411f..2cea0913b 100644 --- a/libs/dyn/visit_map_test.go +++ b/libs/dyn/visit_map_test.go @@ -5,14 +5,14 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) func TestMapWithEmptyPath(t *testing.T) { // An empty path means to return the value itself. vin := dyn.V(42) - vout, err := dyn.MapByPath(dyn.InvalidValue, dyn.EmptyPath, func(v dyn.Value) (dyn.Value, error) { + vout, err := dyn.MapByPath(dyn.InvalidValue, dyn.EmptyPath, func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return vin, nil }) assert.NoError(t, err) @@ -20,11 +20,14 @@ func TestMapWithEmptyPath(t *testing.T) { } func TestMapOnNilValue(t *testing.T) { + var nv dyn.Value var err error - _, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Key("foo")), nil) - assert.ErrorContains(t, err, `expected a map to index "foo", found nil`) - _, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Index(42)), nil) - assert.ErrorContains(t, err, `expected a sequence to index "[42]", found nil`) + nv, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Key("foo")), nil) + assert.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) + nv, err = dyn.MapByPath(dyn.NilValue, dyn.NewPath(dyn.Index(42)), nil) + assert.NoError(t, err) + assert.Equal(t, dyn.NilValue, nv) } func TestMapFuncOnMap(t *testing.T) { @@ -45,7 +48,8 @@ func TestMapFuncOnMap(t *testing.T) { // Note: in the test cases below we implicitly test that the original // value is not modified as we repeatedly set values on it. - vfoo, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(v dyn.Value) (dyn.Value, error) { + vfoo, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Key("foo")), p) assert.Equal(t, dyn.V(42), v) return dyn.V(44), nil }) @@ -55,7 +59,8 @@ func TestMapFuncOnMap(t *testing.T) { "bar": 43, }, vfoo.AsAny()) - vbar, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("bar")), func(v dyn.Value) (dyn.Value, error) { + vbar, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("bar")), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Key("bar")), p) assert.Equal(t, dyn.V(43), v) return dyn.V(45), nil }) @@ -67,13 +72,36 @@ func TestMapFuncOnMap(t *testing.T) { // Return error from map function. var ref = fmt.Errorf("error") - verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(v dyn.Value) (dyn.Value, error) { + verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("foo")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) assert.Equal(t, dyn.InvalidValue, verr) assert.ErrorIs(t, err, ref) } +func TestMapFuncOnMapWithEmptySequence(t *testing.T) { + variants := []dyn.Value{ + // empty sequence + dyn.V([]dyn.Value{}), + // non-empty sequence + dyn.V([]dyn.Value{dyn.V(42)}), + } + + for i := 0; i < len(variants); i++ { + vin := dyn.V(map[string]dyn.Value{ + "key": variants[i], + }) + + for j := 0; j < len(variants); j++ { + vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Key("key")), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + return variants[j], nil + }) + assert.NoError(t, err) + assert.Equal(t, variants[j], vout.Get("key")) + } + } +} + func TestMapFuncOnSequence(t *testing.T) { vin := dyn.V([]dyn.Value{ dyn.V(42), @@ -92,14 +120,16 @@ func TestMapFuncOnSequence(t *testing.T) { // Note: in the test cases below we implicitly test that the original // value is not modified as we repeatedly set values on it. - v0, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(v dyn.Value) (dyn.Value, error) { + v0, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Index(0)), p) assert.Equal(t, dyn.V(42), v) return dyn.V(44), nil }) assert.NoError(t, err) assert.Equal(t, []any{44, 43}, v0.AsAny()) - v1, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(1)), func(v dyn.Value) (dyn.Value, error) { + v1, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(1)), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Index(1)), p) assert.Equal(t, dyn.V(43), v) return dyn.V(45), nil }) @@ -108,13 +138,36 @@ func TestMapFuncOnSequence(t *testing.T) { // Return error from map function. var ref = fmt.Errorf("error") - verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(v dyn.Value) (dyn.Value, error) { + verr, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref }) assert.Equal(t, dyn.InvalidValue, verr) assert.ErrorIs(t, err, ref) } +func TestMapFuncOnSequenceWithEmptySequence(t *testing.T) { + variants := []dyn.Value{ + // empty sequence + dyn.V([]dyn.Value{}), + // non-empty sequence + dyn.V([]dyn.Value{dyn.V(42)}), + } + + for i := 0; i < len(variants); i++ { + vin := dyn.V([]dyn.Value{ + variants[i], + }) + + for j := 0; j < len(variants); j++ { + vout, err := dyn.MapByPath(vin, dyn.NewPath(dyn.Index(0)), func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + return variants[j], nil + }) + assert.NoError(t, err) + assert.Equal(t, variants[j], vout.Index(0)) + } + } +} + func TestMapForeachOnMap(t *testing.T) { vin := dyn.V(map[string]dyn.Value{ "foo": dyn.V(42), @@ -124,10 +177,19 @@ func TestMapForeachOnMap(t *testing.T) { var err error // Run foreach, adding 1 to each of the elements. - vout, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + vout, err := dyn.Map(vin, ".", dyn.Foreach(func(p dyn.Path, v dyn.Value) (dyn.Value, error) { i, ok := v.AsInt() require.True(t, ok, "expected an integer") - return dyn.V(int(i) + 1), nil + switch p[0].Key() { + case "foo": + assert.EqualValues(t, 42, i) + return dyn.V(43), nil + case "bar": + assert.EqualValues(t, 43, i) + return dyn.V(44), nil + default: + return dyn.InvalidValue, fmt.Errorf("unexpected key %q", p[0].Key()) + } })) assert.NoError(t, err) assert.Equal(t, map[string]any{ @@ -150,7 +212,7 @@ func TestMapForeachOnMapError(t *testing.T) { // Check that an error from the map function propagates. var ref = fmt.Errorf("error") - _, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) assert.ErrorIs(t, err, ref) @@ -165,10 +227,19 @@ func TestMapForeachOnSequence(t *testing.T) { var err error // Run foreach, adding 1 to each of the elements. - vout, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + vout, err := dyn.Map(vin, ".", dyn.Foreach(func(p dyn.Path, v dyn.Value) (dyn.Value, error) { i, ok := v.AsInt() require.True(t, ok, "expected an integer") - return dyn.V(int(i) + 1), nil + switch p[0].Index() { + case 0: + assert.EqualValues(t, 42, i) + return dyn.V(43), nil + case 1: + assert.EqualValues(t, 43, i) + return dyn.V(44), nil + default: + return dyn.InvalidValue, fmt.Errorf("unexpected index %d", p[0].Index()) + } })) assert.NoError(t, err) assert.Equal(t, []any{43, 44}, vout.AsAny()) @@ -185,7 +256,7 @@ func TestMapForeachOnSequenceError(t *testing.T) { // Check that an error from the map function propagates. var ref = fmt.Errorf("error") - _, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, ref })) assert.ErrorIs(t, err, ref) @@ -195,8 +266,123 @@ func TestMapForeachOnOtherError(t *testing.T) { vin := dyn.V(42) // Check that if foreach is applied to something other than a map or a sequence, it returns an error. - _, err := dyn.Map(vin, ".", dyn.Foreach(func(v dyn.Value) (dyn.Value, error) { + _, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { return dyn.InvalidValue, nil })) assert.ErrorContains(t, err, "expected a map or sequence, found int") } + +func TestMapForeachOnNil(t *testing.T) { + vin := dyn.NilValue + + // Check that if foreach is applied to nil, it returns nil. + vout, err := dyn.Map(vin, ".", dyn.Foreach(func(_ dyn.Path, v dyn.Value) (dyn.Value, error) { + return dyn.InvalidValue, nil + })) + assert.NoError(t, err) + assert.Equal(t, dyn.NilValue, vout) +} + +func TestMapByPatternOnNilValue(t *testing.T) { + var err error + _, err = dyn.MapByPattern(dyn.NilValue, dyn.NewPattern(dyn.AnyKey()), nil) + assert.ErrorContains(t, err, `expected a map at "", found nil`) + _, err = dyn.MapByPattern(dyn.NilValue, dyn.NewPattern(dyn.AnyIndex()), nil) + assert.ErrorContains(t, err, `expected a sequence at "", found nil`) +} + +func TestMapByPatternOnMap(t *testing.T) { + vin := dyn.V(map[string]dyn.Value{ + "a": dyn.V(map[string]dyn.Value{ + "b": dyn.V(42), + }), + "b": dyn.V(map[string]dyn.Value{ + "c": dyn.V(43), + }), + }) + + var err error + + // Expect an error if the pattern structure doesn't match the value structure. + _, err = dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyKey(), dyn.Index(0)), nil) + assert.ErrorContains(t, err, `expected a sequence to index`) + + // Apply function to pattern "*.b". + vout, err := dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyKey(), dyn.Key("b")), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Key("a"), dyn.Key("b")), p) + assert.Equal(t, dyn.V(42), v) + return dyn.V(44), nil + }) + assert.NoError(t, err) + assert.Equal(t, map[string]any{ + "a": map[string]any{ + "b": 44, + }, + "b": map[string]any{ + "c": 43, + }, + }, vout.AsAny()) +} + +func TestMapByPatternOnMapWithoutMatch(t *testing.T) { + vin := dyn.V(map[string]dyn.Value{ + "a": dyn.V(map[string]dyn.Value{ + "b": dyn.V(42), + }), + "b": dyn.V(map[string]dyn.Value{ + "c": dyn.V(43), + }), + }) + + // Apply function to pattern "*.zzz". + vout, err := dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyKey(), dyn.Key("zzz")), nil) + assert.NoError(t, err) + assert.Equal(t, vin, vout) +} + +func TestMapByPatternOnSequence(t *testing.T) { + vin := dyn.V([]dyn.Value{ + dyn.V([]dyn.Value{ + dyn.V(42), + }), + dyn.V([]dyn.Value{ + dyn.V(43), + dyn.V(44), + }), + }) + + var err error + + // Expect an error if the pattern structure doesn't match the value structure. + _, err = dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyIndex(), dyn.Key("a")), nil) + assert.ErrorContains(t, err, `expected a map to index`) + + // Apply function to pattern "*.c". + vout, err := dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyIndex(), dyn.Index(1)), func(p dyn.Path, v dyn.Value) (dyn.Value, error) { + assert.Equal(t, dyn.NewPath(dyn.Index(1), dyn.Index(1)), p) + assert.Equal(t, dyn.V(44), v) + return dyn.V(45), nil + }) + assert.NoError(t, err) + assert.Equal(t, []any{ + []any{42}, + []any{43, 45}, + }, vout.AsAny()) +} + +func TestMapByPatternOnSequenceWithoutMatch(t *testing.T) { + vin := dyn.V([]dyn.Value{ + dyn.V([]dyn.Value{ + dyn.V(42), + }), + dyn.V([]dyn.Value{ + dyn.V(43), + dyn.V(44), + }), + }) + + // Apply function to pattern "*.zzz". + vout, err := dyn.MapByPattern(vin, dyn.NewPattern(dyn.AnyIndex(), dyn.Index(42)), nil) + assert.NoError(t, err) + assert.Equal(t, vin, vout) +} diff --git a/libs/dyn/visit_set.go b/libs/dyn/visit_set.go index fdbf41c2c..b086fb8a9 100644 --- a/libs/dyn/visit_set.go +++ b/libs/dyn/visit_set.go @@ -1,5 +1,10 @@ package dyn +import ( + "fmt" + "slices" +) + // Set assigns a new value at the specified path in the specified value. // It is identical to [SetByPath], except that it takes a string path instead of a [Path]. func Set(v Value, path string, nv Value) (Value, error) { @@ -14,11 +19,59 @@ func Set(v Value, path string, nv Value) (Value, error) { // If successful, it returns the new value with all intermediate values copied and updated. // If the path doesn't exist, it returns InvalidValue and an error. func SetByPath(v Value, p Path, nv Value) (Value, error) { - return visit(v, EmptyPath, p, visitOptions{ - fn: func(_ Value) (Value, error) { - // Return the incoming value to set it. - return nv, nil + lp := len(p) + if lp == 0 { + return nv, nil + } + + component := p[lp-1] + p = p[:lp-1] + + return visit(v, EmptyPath, NewPatternFromPath(p), visitOptions{ + fn: func(prefix Path, v Value) (Value, error) { + path := append(prefix, component) + + switch { + case component.isKey(): + // Expect a map to be set if this is a key. + m, ok := v.AsMap() + if !ok { + return InvalidValue, fmt.Errorf("expected a map to index %q, found %s", path, v.Kind()) + } + + // Return an updated map value. + m = m.Clone() + m.Set(V(component.key), nv) + return Value{ + v: m, + k: KindMap, + l: v.l, + }, nil + + case component.isIndex(): + // Expect a sequence to be set if this is an index. + s, ok := v.AsSequence() + if !ok { + return InvalidValue, fmt.Errorf("expected a sequence to index %q, found %s", path, v.Kind()) + } + + // Lookup current value in the sequence. + if component.index < 0 || component.index >= len(s) { + return InvalidValue, indexOutOfBoundsError{prefix} + } + + // Return an updated sequence value. + s = slices.Clone(s) + s[component.index] = nv + return Value{ + v: s, + k: KindSequence, + l: v.l, + }, nil + + default: + panic("invalid component") + } }, - allowMissingKeyInMap: true, }) } diff --git a/libs/dyn/visit_set_test.go b/libs/dyn/visit_set_test.go index b38471587..df58941e1 100644 --- a/libs/dyn/visit_set_test.go +++ b/libs/dyn/visit_set_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestSetWithEmptyPath(t *testing.T) { diff --git a/libs/dyn/walk.go b/libs/dyn/walk.go index 138816be6..c51a11e22 100644 --- a/libs/dyn/walk.go +++ b/libs/dyn/walk.go @@ -28,34 +28,36 @@ func walk(v Value, p Path, fn func(p Path, v Value) (Value, error)) (Value, erro if err == ErrSkip { return v, nil } - return NilValue, err + return InvalidValue, err } switch v.Kind() { case KindMap: m := v.MustMap() - out := make(map[string]Value, len(m)) - for k := range m { - nv, err := walk(m[k], p.Append(Key(k)), fn) + out := newMappingWithSize(m.Len()) + for _, pair := range m.Pairs() { + pk := pair.Key + pv := pair.Value + nv, err := walk(pv, append(p, Key(pk.MustString())), fn) if err == ErrDrop { continue } if err != nil { - return NilValue, err + return InvalidValue, err } - out[k] = nv + out.Set(pk, nv) } v.v = out case KindSequence: s := v.MustSequence() out := make([]Value, 0, len(s)) for i := range s { - nv, err := walk(s[i], p.Append(Index(i)), fn) + nv, err := walk(s[i], append(p, Index(i)), fn) if err == ErrDrop { continue } if err != nil { - return NilValue, err + return InvalidValue, err } out = append(out, nv) } diff --git a/libs/dyn/walk_test.go b/libs/dyn/walk_test.go index 1b94ad902..f7222b0a5 100644 --- a/libs/dyn/walk_test.go +++ b/libs/dyn/walk_test.go @@ -5,7 +5,7 @@ import ( "testing" . "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/stretchr/testify/require" ) @@ -39,7 +39,7 @@ func (w *walkCallTracker) returnSkip(path string) { } func (w *walkCallTracker) returnDrop(path string) { - w.on(path, func(v Value) Value { return NilValue }, ErrDrop) + w.on(path, func(v Value) Value { return InvalidValue }, ErrDrop) } func (w *walkCallTracker) track(p Path, v Value) (Value, error) { @@ -148,7 +148,7 @@ func TestWalkMapError(t *testing.T) { }) out, err := Walk(value, tracker.track) assert.Equal(t, cerr, err) - assert.Equal(t, NilValue, out) + assert.Equal(t, InvalidValue, out) // The callback should have been called twice. assert.Len(t, tracker.calls, 2) @@ -239,7 +239,7 @@ func TestWalkSequenceError(t *testing.T) { }) out, err := Walk(value, tracker.track) assert.Equal(t, cerr, err) - assert.Equal(t, NilValue, out) + assert.Equal(t, InvalidValue, out) // The callback should have been called three times. assert.Len(t, tracker.calls, 3) diff --git a/libs/dyn/yamlloader/loader.go b/libs/dyn/yamlloader/loader.go index 899e1d7b8..fbb52b504 100644 --- a/libs/dyn/yamlloader/loader.go +++ b/libs/dyn/yamlloader/loader.go @@ -55,7 +55,7 @@ func (d *loader) load(node *yaml.Node) (dyn.Value, error) { case yaml.AliasNode: value, err = d.loadAlias(node, loc) default: - return dyn.NilValue, errorf(loc, "unknown node kind: %v", node.Kind) + return dyn.InvalidValue, errorf(loc, "unknown node kind: %v", node.Kind) } if err != nil { @@ -80,26 +80,26 @@ func (d *loader) loadSequence(node *yaml.Node, loc dyn.Location) (dyn.Value, err for i, n := range node.Content { v, err := d.load(n) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } acc[i] = v } - return dyn.NewValue(acc, loc), nil + return dyn.NewValue(acc, []dyn.Location{loc}), nil } func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { var merge *yaml.Node - acc := make(map[string]dyn.Value) + acc := dyn.NewMapping() for i := 0; i < len(node.Content); i += 2 { key := node.Content[i] val := node.Content[i+1] // Assert that keys are strings if key.Kind != yaml.ScalarNode { - return dyn.NilValue, errorf(loc, "key is not a scalar") + return dyn.InvalidValue, errorf(loc, "key is not a scalar") } st := key.ShortTag() @@ -113,19 +113,24 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro merge = val continue default: - return dyn.NilValue, errorf(loc, "invalid key tag: %v", st) + return dyn.InvalidValue, errorf(loc, "invalid key tag: %v", st) + } + + k, err := d.load(key) + if err != nil { + return dyn.InvalidValue, err } v, err := d.load(val) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } - acc[key.Value] = v + acc.Set(k, v) } if merge == nil { - return dyn.NewValue(acc, loc), nil + return dyn.NewValue(acc, []dyn.Location{loc}), nil } // Build location for the merge node. @@ -146,11 +151,11 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro // Build a sequence of values to merge. // The entries that we already accumulated have precedence. - var seq []map[string]dyn.Value + var seq []dyn.Mapping for _, n := range mnodes { v, err := d.load(n) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } m, ok := v.AsMap() if !ok { @@ -161,48 +166,46 @@ func (d *loader) loadMapping(node *yaml.Node, loc dyn.Location) (dyn.Value, erro // Append the accumulated entries to the sequence. seq = append(seq, acc) - out := make(map[string]dyn.Value) + out := dyn.NewMapping() for _, m := range seq { - for k, v := range m { - out[k] = v - } + out.Merge(m) } - return dyn.NewValue(out, loc), nil + return dyn.NewValue(out, []dyn.Location{loc}), nil } func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error) { st := node.ShortTag() switch st { case "!!str": - return dyn.NewValue(node.Value, loc), nil + return dyn.NewValue(node.Value, []dyn.Location{loc}), nil case "!!bool": switch strings.ToLower(node.Value) { case "true": - return dyn.NewValue(true, loc), nil + return dyn.NewValue(true, []dyn.Location{loc}), nil case "false": - return dyn.NewValue(false, loc), nil + return dyn.NewValue(false, []dyn.Location{loc}), nil default: - return dyn.NilValue, errorf(loc, "invalid bool value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid bool value: %v", node.Value) } case "!!int": i64, err := strconv.ParseInt(node.Value, 10, 64) if err != nil { - return dyn.NilValue, errorf(loc, "invalid int value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid int value: %v", node.Value) } // Use regular int type instead of int64 if possible. if i64 >= math.MinInt32 && i64 <= math.MaxInt32 { - return dyn.NewValue(int(i64), loc), nil + return dyn.NewValue(int(i64), []dyn.Location{loc}), nil } - return dyn.NewValue(i64, loc), nil + return dyn.NewValue(i64, []dyn.Location{loc}), nil case "!!float": f64, err := strconv.ParseFloat(node.Value, 64) if err != nil { - return dyn.NilValue, errorf(loc, "invalid float value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid float value: %v", node.Value) } - return dyn.NewValue(f64, loc), nil + return dyn.NewValue(f64, []dyn.Location{loc}), nil case "!!null": - return dyn.NewValue(nil, loc), nil + return dyn.NewValue(nil, []dyn.Location{loc}), nil case "!!timestamp": // Try a couple of layouts for _, layout := range []string{ @@ -213,12 +216,12 @@ func (d *loader) loadScalar(node *yaml.Node, loc dyn.Location) (dyn.Value, error } { t, terr := time.Parse(layout, node.Value) if terr == nil { - return dyn.NewValue(t, loc), nil + return dyn.NewValue(t, []dyn.Location{loc}), nil } } - return dyn.NilValue, errorf(loc, "invalid timestamp value: %v", node.Value) + return dyn.InvalidValue, errorf(loc, "invalid timestamp value: %v", node.Value) default: - return dyn.NilValue, errorf(loc, "unknown tag: %v", st) + return dyn.InvalidValue, errorf(loc, "unknown tag: %v", st) } } diff --git a/libs/dyn/yamlloader/yaml.go b/libs/dyn/yamlloader/yaml.go index a18324ffa..b79b41e1e 100644 --- a/libs/dyn/yamlloader/yaml.go +++ b/libs/dyn/yamlloader/yaml.go @@ -15,7 +15,7 @@ func LoadYAML(path string, r io.Reader) (dyn.Value, error) { if err == io.EOF { return dyn.NilValue, nil } - return dyn.NilValue, err + return dyn.InvalidValue, err } return newLoader(path).load(&node) diff --git a/libs/dyn/yamlloader/yaml_anchor_test.go b/libs/dyn/yamlloader/yaml_anchor_test.go index 05beb5401..29ce69f0a 100644 --- a/libs/dyn/yamlloader/yaml_anchor_test.go +++ b/libs/dyn/yamlloader/yaml_anchor_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestYAMLAnchor01(t *testing.T) { diff --git a/libs/dyn/yamlloader/yaml_error_test.go b/libs/dyn/yamlloader/yaml_error_test.go index 11c444ad3..0ae424341 100644 --- a/libs/dyn/yamlloader/yaml_error_test.go +++ b/libs/dyn/yamlloader/yaml_error_test.go @@ -5,8 +5,8 @@ import ( "os" "testing" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/databricks/cli/libs/dyn/yamlloader" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) diff --git a/libs/dyn/yamlloader/yaml_mix_test.go b/libs/dyn/yamlloader/yaml_mix_test.go index 307b93dbf..55ded6baf 100644 --- a/libs/dyn/yamlloader/yaml_mix_test.go +++ b/libs/dyn/yamlloader/yaml_mix_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestYAMLMix01(t *testing.T) { diff --git a/libs/dyn/yamlloader/yaml_test.go b/libs/dyn/yamlloader/yaml_test.go index 14269feee..9bb0377dd 100644 --- a/libs/dyn/yamlloader/yaml_test.go +++ b/libs/dyn/yamlloader/yaml_test.go @@ -6,8 +6,8 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" + assert "github.com/databricks/cli/libs/dyn/dynassert" "github.com/databricks/cli/libs/dyn/yamlloader" - "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "gopkg.in/yaml.v3" ) diff --git a/libs/dyn/yamlsaver/order_test.go b/libs/dyn/yamlsaver/order_test.go index ed2877f6c..ee9dc4752 100644 --- a/libs/dyn/yamlsaver/order_test.go +++ b/libs/dyn/yamlsaver/order_test.go @@ -3,7 +3,7 @@ package yamlsaver import ( "testing" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestOrderReturnsIncreasingIndex(t *testing.T) { diff --git a/libs/dyn/yamlsaver/saver.go b/libs/dyn/yamlsaver/saver.go index f5863ecfb..fe4cfb854 100644 --- a/libs/dyn/yamlsaver/saver.go +++ b/libs/dyn/yamlsaver/saver.go @@ -9,11 +9,24 @@ import ( "strconv" "github.com/databricks/cli/libs/dyn" - "golang.org/x/exp/maps" "gopkg.in/yaml.v3" ) -func SaveAsYAML(data any, filename string, force bool) error { +type saver struct { + nodesWithStyle map[string]yaml.Style +} + +func NewSaver() *saver { + return &saver{} +} + +func NewSaverWithStyle(nodesWithStyle map[string]yaml.Style) *saver { + return &saver{ + nodesWithStyle: nodesWithStyle, + } +} + +func (s *saver) SaveAsYAML(data any, filename string, force bool) error { err := os.MkdirAll(filepath.Dir(filename), 0755) if err != nil { return err @@ -36,15 +49,15 @@ func SaveAsYAML(data any, filename string, force bool) error { } defer file.Close() - err = encode(data, file) + err = s.encode(data, file) if err != nil { return err } return nil } -func encode(data any, w io.Writer) error { - yamlNode, err := ToYamlNode(dyn.V(data)) +func (s *saver) encode(data any, w io.Writer) error { + yamlNode, err := s.toYamlNode(dyn.V(data)) if err != nil { return err } @@ -53,23 +66,35 @@ func encode(data any, w io.Writer) error { return enc.Encode(yamlNode) } -func ToYamlNode(v dyn.Value) (*yaml.Node, error) { +func (s *saver) toYamlNode(v dyn.Value) (*yaml.Node, error) { + return s.toYamlNodeWithStyle(v, yaml.Style(0)) +} + +func (s *saver) toYamlNodeWithStyle(v dyn.Value, style yaml.Style) (*yaml.Node, error) { switch v.Kind() { case dyn.KindMap: m, _ := v.AsMap() - keys := maps.Keys(m) + // We're using location lines to define the order of keys in YAML. // The location is set when we convert API response struct to config.Value representation // See convert.convertMap for details - sort.SliceStable(keys, func(i, j int) bool { - return m[keys[i]].Location().Line < m[keys[j]].Location().Line + pairs := m.Pairs() + sort.SliceStable(pairs, func(i, j int) bool { + return pairs[i].Value.Location().Line < pairs[j].Value.Location().Line }) content := make([]*yaml.Node, 0) - for _, k := range keys { - item := m[k] - node := yaml.Node{Kind: yaml.ScalarNode, Value: k} - c, err := ToYamlNode(item) + for _, pair := range pairs { + pk := pair.Key + pv := pair.Value + node := yaml.Node{Kind: yaml.ScalarNode, Value: pk.MustString(), Style: style} + var nestedNodeStyle yaml.Style + if customStyle, ok := s.hasStyle(pk.MustString()); ok { + nestedNodeStyle = customStyle + } else { + nestedNodeStyle = style + } + c, err := s.toYamlNodeWithStyle(pv, nestedNodeStyle) if err != nil { return nil, err } @@ -77,40 +102,45 @@ func ToYamlNode(v dyn.Value) (*yaml.Node, error) { content = append(content, c) } - return &yaml.Node{Kind: yaml.MappingNode, Content: content}, nil + return &yaml.Node{Kind: yaml.MappingNode, Content: content, Style: style}, nil case dyn.KindSequence: - s, _ := v.AsSequence() + seq, _ := v.AsSequence() content := make([]*yaml.Node, 0) - for _, item := range s { - node, err := ToYamlNode(item) + for _, item := range seq { + node, err := s.toYamlNodeWithStyle(item, style) if err != nil { return nil, err } content = append(content, node) } - return &yaml.Node{Kind: yaml.SequenceNode, Content: content}, nil + return &yaml.Node{Kind: yaml.SequenceNode, Content: content, Style: style}, nil case dyn.KindNil: - return &yaml.Node{Kind: yaml.ScalarNode, Value: "null"}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: "null", Style: style}, nil case dyn.KindString: // If the string is a scalar value (bool, int, float and etc.), we want to quote it. if isScalarValueInString(v) { return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustString(), Style: yaml.DoubleQuotedStyle}, nil } - return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustString()}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustString(), Style: style}, nil case dyn.KindBool: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustBool())}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustBool()), Style: style}, nil case dyn.KindInt: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustInt())}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustInt()), Style: style}, nil case dyn.KindFloat: - return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustFloat())}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: fmt.Sprint(v.MustFloat()), Style: style}, nil case dyn.KindTime: - return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustTime().UTC().String()}, nil + return &yaml.Node{Kind: yaml.ScalarNode, Value: v.MustTime().UTC().String(), Style: style}, nil default: // Panic because we only want to deal with known types. panic(fmt.Sprintf("invalid kind: %d", v.Kind())) } } +func (s *saver) hasStyle(key string) (yaml.Style, bool) { + style, ok := s.nodesWithStyle[key] + return style, ok +} + func isScalarValueInString(v dyn.Value) bool { if v.Kind() != dyn.KindString { return false diff --git a/libs/dyn/yamlsaver/saver_test.go b/libs/dyn/yamlsaver/saver_test.go index 70878d55b..387090104 100644 --- a/libs/dyn/yamlsaver/saver_test.go +++ b/libs/dyn/yamlsaver/saver_test.go @@ -5,58 +5,64 @@ import ( "time" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" "gopkg.in/yaml.v3" ) func TestMarshalNilValue(t *testing.T) { + s := NewSaver() var nilValue = dyn.NilValue - v, err := ToYamlNode(nilValue) + v, err := s.toYamlNode(nilValue) assert.NoError(t, err) assert.Equal(t, "null", v.Value) } func TestMarshalIntValue(t *testing.T) { - var intValue = dyn.NewValue(1, dyn.Location{}) - v, err := ToYamlNode(intValue) + s := NewSaver() + var intValue = dyn.V(1) + v, err := s.toYamlNode(intValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalFloatValue(t *testing.T) { - var floatValue = dyn.NewValue(1.0, dyn.Location{}) - v, err := ToYamlNode(floatValue) + s := NewSaver() + var floatValue = dyn.V(1.0) + v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalBoolValue(t *testing.T) { - var boolValue = dyn.NewValue(true, dyn.Location{}) - v, err := ToYamlNode(boolValue) + s := NewSaver() + var boolValue = dyn.V(true) + v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalTimeValue(t *testing.T) { - var timeValue = dyn.NewValue(time.Unix(0, 0), dyn.Location{}) - v, err := ToYamlNode(timeValue) + s := NewSaver() + var timeValue = dyn.V(time.Unix(0, 0)) + v, err := s.toYamlNode(timeValue) assert.NoError(t, err) assert.Equal(t, "1970-01-01 00:00:00 +0000 UTC", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalSequenceValue(t *testing.T) { + s := NewSaver() var sequenceValue = dyn.NewValue( []dyn.Value{ - dyn.NewValue("value1", dyn.Location{File: "file", Line: 1, Column: 2}), - dyn.NewValue("value2", dyn.Location{File: "file", Line: 2, Column: 2}), + dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), + dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ) - v, err := ToYamlNode(sequenceValue) + v, err := s.toYamlNode(sequenceValue) assert.NoError(t, err) assert.Equal(t, yaml.SequenceNode, v.Kind) assert.Equal(t, "value1", v.Content[0].Value) @@ -64,23 +70,26 @@ func TestMarshalSequenceValue(t *testing.T) { } func TestMarshalStringValue(t *testing.T) { - var stringValue = dyn.NewValue("value", dyn.Location{}) - v, err := ToYamlNode(stringValue) + s := NewSaver() + var stringValue = dyn.V("value") + v, err := s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "value", v.Value) assert.Equal(t, yaml.ScalarNode, v.Kind) } func TestMarshalMapValue(t *testing.T) { + s := NewSaver() var mapValue = dyn.NewValue( map[string]dyn.Value{ - "key3": dyn.NewValue("value3", dyn.Location{File: "file", Line: 3, Column: 2}), - "key2": dyn.NewValue("value2", dyn.Location{File: "file", Line: 2, Column: 2}), - "key1": dyn.NewValue("value1", dyn.Location{File: "file", Line: 1, Column: 2}), + "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 3, Column: 2}}), + "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), + "key1": dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ) - v, err := ToYamlNode(mapValue) + + v, err := s.toYamlNode(mapValue) assert.NoError(t, err) assert.Equal(t, yaml.MappingNode, v.Kind) assert.Equal(t, "key1", v.Content[0].Value) @@ -94,18 +103,19 @@ func TestMarshalMapValue(t *testing.T) { } func TestMarshalNestedValues(t *testing.T) { + s := NewSaver() var mapValue = dyn.NewValue( map[string]dyn.Value{ "key1": dyn.NewValue( map[string]dyn.Value{ - "key2": dyn.NewValue("value", dyn.Location{File: "file", Line: 1, Column: 2}), + "key2": dyn.NewValue("value", []dyn.Location{{File: "file", Line: 1, Column: 2}}), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ), }, - dyn.Location{File: "file", Line: 1, Column: 2}, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, ) - v, err := ToYamlNode(mapValue) + v, err := s.toYamlNode(mapValue) assert.NoError(t, err) assert.Equal(t, yaml.MappingNode, v.Kind) assert.Equal(t, "key1", v.Content[0].Value) @@ -115,15 +125,16 @@ func TestMarshalNestedValues(t *testing.T) { } func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { - var hexValue = dyn.NewValue(0x123, dyn.Location{}) - v, err := ToYamlNode(hexValue) + s := NewSaver() + var hexValue = dyn.V(0x123) + v, err := s.toYamlNode(hexValue) assert.NoError(t, err) assert.Equal(t, "291", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("0x123", dyn.Location{}) - v, err = ToYamlNode(stringValue) + var stringValue = dyn.V("0x123") + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0x123", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) @@ -131,15 +142,16 @@ func TestMarshalHexadecimalValueIsQuoted(t *testing.T) { } func TestMarshalBinaryValueIsQuoted(t *testing.T) { - var binaryValue = dyn.NewValue(0b101, dyn.Location{}) - v, err := ToYamlNode(binaryValue) + s := NewSaver() + var binaryValue = dyn.V(0b101) + v, err := s.toYamlNode(binaryValue) assert.NoError(t, err) assert.Equal(t, "5", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("0b101", dyn.Location{}) - v, err = ToYamlNode(stringValue) + var stringValue = dyn.V("0b101") + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0b101", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) @@ -147,15 +159,16 @@ func TestMarshalBinaryValueIsQuoted(t *testing.T) { } func TestMarshalOctalValueIsQuoted(t *testing.T) { - var octalValue = dyn.NewValue(0123, dyn.Location{}) - v, err := ToYamlNode(octalValue) + s := NewSaver() + var octalValue = dyn.V(0123) + v, err := s.toYamlNode(octalValue) assert.NoError(t, err) assert.Equal(t, "83", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("0123", dyn.Location{}) - v, err = ToYamlNode(stringValue) + var stringValue = dyn.V("0123") + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "0123", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) @@ -163,15 +176,16 @@ func TestMarshalOctalValueIsQuoted(t *testing.T) { } func TestMarshalFloatValueIsQuoted(t *testing.T) { - var floatValue = dyn.NewValue(1.0, dyn.Location{}) - v, err := ToYamlNode(floatValue) + s := NewSaver() + var floatValue = dyn.V(1.0) + v, err := s.toYamlNode(floatValue) assert.NoError(t, err) assert.Equal(t, "1", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("1.0", dyn.Location{}) - v, err = ToYamlNode(stringValue) + var stringValue = dyn.V("1.0") + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "1.0", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) @@ -179,17 +193,79 @@ func TestMarshalFloatValueIsQuoted(t *testing.T) { } func TestMarshalBoolValueIsQuoted(t *testing.T) { - var boolValue = dyn.NewValue(true, dyn.Location{}) - v, err := ToYamlNode(boolValue) + s := NewSaver() + var boolValue = dyn.V(true) + v, err := s.toYamlNode(boolValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.Style(0), v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) - var stringValue = dyn.NewValue("true", dyn.Location{}) - v, err = ToYamlNode(stringValue) + var stringValue = dyn.V("true") + v, err = s.toYamlNode(stringValue) assert.NoError(t, err) assert.Equal(t, "true", v.Value) assert.Equal(t, yaml.DoubleQuotedStyle, v.Style) assert.Equal(t, yaml.ScalarNode, v.Kind) } + +func TestCustomStylingWithNestedMap(t *testing.T) { + s := NewSaverWithStyle(map[string]yaml.Style{ + "styled": yaml.DoubleQuotedStyle, + }) + + var styledMap = dyn.NewValue( + map[string]dyn.Value{ + "key1": dyn.NewValue("value1", []dyn.Location{{File: "file", Line: 1, Column: 2}}), + "key2": dyn.NewValue("value2", []dyn.Location{{File: "file", Line: 2, Column: 2}}), + }, + []dyn.Location{{File: "file", Line: -2, Column: 2}}, + ) + + var unstyledMap = dyn.NewValue( + map[string]dyn.Value{ + "key3": dyn.NewValue("value3", []dyn.Location{{File: "file", Line: 1, Column: 2}}), + "key4": dyn.NewValue("value4", []dyn.Location{{File: "file", Line: 2, Column: 2}}), + }, + []dyn.Location{{File: "file", Line: -1, Column: 2}}, + ) + + var val = dyn.NewValue( + map[string]dyn.Value{ + "styled": styledMap, + "unstyled": unstyledMap, + }, + []dyn.Location{{File: "file", Line: 1, Column: 2}}, + ) + + mv, err := s.toYamlNode(val) + assert.NoError(t, err) + + // Check that the styled map is quoted + v := mv.Content[1] + + assert.Equal(t, yaml.MappingNode, v.Kind) + assert.Equal(t, "key1", v.Content[0].Value) + assert.Equal(t, "value1", v.Content[1].Value) + assert.Equal(t, yaml.DoubleQuotedStyle, v.Content[0].Style) + assert.Equal(t, yaml.DoubleQuotedStyle, v.Content[1].Style) + + assert.Equal(t, "key2", v.Content[2].Value) + assert.Equal(t, "value2", v.Content[3].Value) + assert.Equal(t, yaml.DoubleQuotedStyle, v.Content[2].Style) + assert.Equal(t, yaml.DoubleQuotedStyle, v.Content[3].Style) + + // Check that the unstyled map is not quoted + v = mv.Content[3] + + assert.Equal(t, yaml.MappingNode, v.Kind) + assert.Equal(t, "key3", v.Content[0].Value) + assert.Equal(t, "value3", v.Content[1].Value) + assert.Equal(t, yaml.Style(0), v.Content[0].Style) + assert.Equal(t, yaml.Style(0), v.Content[1].Style) + + assert.Equal(t, "key4", v.Content[2].Value) + assert.Equal(t, "value4", v.Content[3].Value) + assert.Equal(t, yaml.Style(0), v.Content[2].Style) + assert.Equal(t, yaml.Style(0), v.Content[3].Style) +} diff --git a/libs/dyn/yamlsaver/utils.go b/libs/dyn/yamlsaver/utils.go index 0fb4064b5..a162bf31f 100644 --- a/libs/dyn/yamlsaver/utils.go +++ b/libs/dyn/yamlsaver/utils.go @@ -15,7 +15,7 @@ func ConvertToMapValue(strct any, order *Order, skipFields []string, dst map[str ref := dyn.NilValue mv, err := convert.FromTyped(strct, ref) if err != nil { - return dyn.NilValue, err + return dyn.InvalidValue, err } if mv.Kind() != dyn.KindMap { @@ -26,7 +26,9 @@ func ConvertToMapValue(strct any, order *Order, skipFields []string, dst map[str } func skipAndOrder(mv dyn.Value, order *Order, skipFields []string, dst map[string]dyn.Value) (dyn.Value, error) { - for k, v := range mv.MustMap() { + for _, pair := range mv.MustMap().Pairs() { + k := pair.Key.MustString() + v := pair.Value if v.Kind() == dyn.KindNil { continue } @@ -42,7 +44,7 @@ func skipAndOrder(mv dyn.Value, order *Order, skipFields []string, dst map[strin continue } - dst[k] = dyn.NewValue(v.Value(), dyn.Location{Line: order.Get(k)}) + dst[k] = dyn.NewValue(v.Value(), []dyn.Location{{Line: order.Get(k)}}) } return dyn.V(dst), nil diff --git a/libs/dyn/yamlsaver/utils_test.go b/libs/dyn/yamlsaver/utils_test.go index 32c9143be..1afab601a 100644 --- a/libs/dyn/yamlsaver/utils_test.go +++ b/libs/dyn/yamlsaver/utils_test.go @@ -4,7 +4,7 @@ import ( "testing" "github.com/databricks/cli/libs/dyn" - "github.com/stretchr/testify/assert" + assert "github.com/databricks/cli/libs/dyn/dynassert" ) func TestConvertToMapValueWithOrder(t *testing.T) { @@ -32,17 +32,26 @@ func TestConvertToMapValueWithOrder(t *testing.T) { result, err := ConvertToMapValue(v, NewOrder([]string{"list", "name", "map"}), []string{"format"}, map[string]dyn.Value{}) assert.NoError(t, err) - assert.Equal(t, map[string]dyn.Value{ - "list": dyn.NewValue([]dyn.Value{ - dyn.V("a"), - dyn.V("b"), - dyn.V("c"), - }, dyn.Location{Line: -3}), - "name": dyn.NewValue("test", dyn.Location{Line: -2}), - "map": dyn.NewValue(map[string]dyn.Value{ - "key1": dyn.V("value1"), - "key2": dyn.V("value2"), - }, dyn.Location{Line: -1}), - "long_name_field": dyn.NewValue("long name goes here", dyn.Location{Line: 1}), - }, result.MustMap()) + assert.Equal(t, dyn.V(map[string]dyn.Value{ + "list": dyn.NewValue( + []dyn.Value{ + dyn.V("a"), + dyn.V("b"), + dyn.V("c"), + }, + []dyn.Location{{Line: -3}}, + ), + "name": dyn.NewValue( + "test", + []dyn.Location{{Line: -2}}, + ), + "map": dyn.NewValue( + map[string]dyn.Value{ + "key1": dyn.V("value1"), + "key2": dyn.V("value2"), + }, + []dyn.Location{{Line: -1}}, + ), + "long_name_field": dyn.NewValue("long name goes here", []dyn.Location{{Line: 1}}), + }), result) } diff --git a/libs/exec/exec.go b/libs/exec/exec.go index 9767c199a..8e4633271 100644 --- a/libs/exec/exec.go +++ b/libs/exec/exec.go @@ -90,18 +90,25 @@ func NewCommandExecutorWithExecutable(dir string, execType ExecutableType) (*Exe }, nil } -func (e *Executor) StartCommand(ctx context.Context, command string) (Command, error) { +func (e *Executor) prepareCommand(ctx context.Context, command string) (*osexec.Cmd, *execContext, error) { ec, err := e.shell.prepare(command) + if err != nil { + return nil, nil, err + } + cmd := osexec.CommandContext(ctx, ec.executable, ec.args...) + cmd.Dir = e.dir + return cmd, ec, nil +} + +func (e *Executor) StartCommand(ctx context.Context, command string) (Command, error) { + cmd, ec, err := e.prepareCommand(ctx, command) if err != nil { return nil, err } - return e.start(ctx, ec) + return e.start(ctx, cmd, ec) } -func (e *Executor) start(ctx context.Context, ec *execContext) (Command, error) { - cmd := osexec.CommandContext(ctx, ec.executable, ec.args...) - cmd.Dir = e.dir - +func (e *Executor) start(ctx context.Context, cmd *osexec.Cmd, ec *execContext) (Command, error) { stdout, err := cmd.StdoutPipe() if err != nil { return nil, err @@ -116,17 +123,12 @@ func (e *Executor) start(ctx context.Context, ec *execContext) (Command, error) } func (e *Executor) Exec(ctx context.Context, command string) ([]byte, error) { - cmd, err := e.StartCommand(ctx, command) + cmd, ec, err := e.prepareCommand(ctx, command) if err != nil { return nil, err } - - res, err := io.ReadAll(io.MultiReader(cmd.Stdout(), cmd.Stderr())) - if err != nil { - return nil, err - } - - return res, cmd.Wait() + defer os.Remove(ec.scriptFile) + return cmd.CombinedOutput() } func (e *Executor) ShellType() ExecutableType { diff --git a/libs/exec/exec_test.go b/libs/exec/exec_test.go index 0730638e3..ad54601d0 100644 --- a/libs/exec/exec_test.go +++ b/libs/exec/exec_test.go @@ -32,6 +32,15 @@ func TestExecutorWithComplexInput(t *testing.T) { assert.Equal(t, "Hello\nWorld\n", string(out)) } +func TestExecutorWithStderr(t *testing.T) { + executor, err := NewCommandExecutor(".") + assert.NoError(t, err) + out, err := executor.Exec(context.Background(), "echo 'Hello' && >&2 echo 'Error'") + assert.NoError(t, err) + assert.NotNil(t, out) + assert.Equal(t, "Hello\nError\n", string(out)) +} + func TestExecutorWithInvalidCommand(t *testing.T) { executor, err := NewCommandExecutor(".") assert.NoError(t, err) @@ -108,16 +117,16 @@ func TestExecutorCleanupsTempFiles(t *testing.T) { executor, err := NewCommandExecutor(".") assert.NoError(t, err) - ec, err := executor.shell.prepare("echo 'Hello'") + cmd, ec, err := executor.prepareCommand(context.Background(), "echo 'Hello'") assert.NoError(t, err) - cmd, err := executor.start(context.Background(), ec) + command, err := executor.start(context.Background(), cmd, ec) assert.NoError(t, err) fileName := ec.args[1] assert.FileExists(t, fileName) - err = cmd.Wait() + err = command.Wait() assert.NoError(t, err) assert.NoFileExists(t, fileName) } diff --git a/libs/exec/shell_bash.go b/libs/exec/shell_bash.go index bb8c6c514..9f6b508f4 100644 --- a/libs/exec/shell_bash.go +++ b/libs/exec/shell_bash.go @@ -34,8 +34,12 @@ func newBashShell() (shell, error) { return nil, nil } + // Convert to lowercase for case-insensitive comparison + // Some systems may return some parts of the path in uppercase. + outLower := strings.ToLower(out) // Skipping WSL bash if found one - if strings.Contains(out, `\Windows\System32\bash.exe`) || strings.Contains(out, `\Microsoft\WindowsApps\bash.exe`) { + if strings.Contains(outLower, `\windows\system32\bash.exe`) || + strings.Contains(outLower, `\microsoft\windowsapps\bash.exe`) { return nil, nil } diff --git a/libs/filer/files_client.go b/libs/filer/files_client.go index 17884d573..9fc68bd56 100644 --- a/libs/filer/files_client.go +++ b/libs/filer/files_client.go @@ -11,18 +11,30 @@ import ( "net/url" "path" "slices" + "sort" "strings" "time" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/service/files" + "golang.org/x/sync/errgroup" ) +// As of 19th Feb 2024, the Files API backend has a rate limit of 10 concurrent +// requests and 100 QPS. We limit the number of concurrent requests to 5 to +// avoid hitting the rate limit. +const maxFilesRequestsInFlight = 5 + // Type that implements fs.FileInfo for the Files API. +// This is required for the filer.Stat() method. type filesApiFileInfo struct { - absPath string - isDir bool + absPath string + isDir bool + fileSize int64 + lastModified int64 } func (info filesApiFileInfo) Name() string { @@ -30,8 +42,7 @@ func (info filesApiFileInfo) Name() string { } func (info filesApiFileInfo) Size() int64 { - // No way to get the file size in the Files API. - return 0 + return info.fileSize } func (info filesApiFileInfo) Mode() fs.FileMode { @@ -43,7 +54,7 @@ func (info filesApiFileInfo) Mode() fs.FileMode { } func (info filesApiFileInfo) ModTime() time.Time { - return time.Time{} + return time.UnixMilli(info.lastModified) } func (info filesApiFileInfo) IsDir() bool { @@ -54,6 +65,28 @@ func (info filesApiFileInfo) Sys() any { return nil } +// Type that implements fs.DirEntry for the Files API. +// This is required for the filer.ReadDir() method. +type filesApiDirEntry struct { + i filesApiFileInfo +} + +func (e filesApiDirEntry) Name() string { + return e.i.Name() +} + +func (e filesApiDirEntry) IsDir() bool { + return e.i.IsDir() +} + +func (e filesApiDirEntry) Type() fs.FileMode { + return e.i.Mode() +} + +func (e filesApiDirEntry) Info() (fs.FileInfo, error) { + return e.i, nil +} + // FilesClient implements the [Filer] interface for the Files API backend. type FilesClient struct { workspaceClient *databricks.WorkspaceClient @@ -63,10 +96,6 @@ type FilesClient struct { root WorkspaceRootPath } -func filesNotImplementedError(fn string) error { - return fmt.Errorf("filer.%s is not implemented for the Files API", fn) -} - func NewFilesClient(w *databricks.WorkspaceClient, root string) (Filer, error) { apiClient, err := client.New(w.Config) if err != nil { @@ -102,6 +131,24 @@ func (w *FilesClient) Write(ctx context.Context, name string, reader io.Reader, return err } + // Check that target path exists if CreateParentDirectories mode is not set + if !slices.Contains(mode, CreateParentDirectories) { + err := w.workspaceClient.Files.GetDirectoryMetadataByDirectoryPath(ctx, path.Dir(absPath)) + if err != nil { + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + return err + } + + // This API returns a 404 if the file doesn't exist. + if aerr.StatusCode == http.StatusNotFound { + return NoSuchDirectoryError{path.Dir(absPath)} + } + + return err + } + } + overwrite := slices.Contains(mode, OverwriteIfExists) urlPath = fmt.Sprintf("%s?overwrite=%t", urlPath, overwrite) headers := map[string]string{"Content-Type": "application/octet-stream"} @@ -119,7 +166,7 @@ func (w *FilesClient) Write(ctx context.Context, name string, reader io.Reader, } // This API returns 409 if the file already exists, when the object type is file - if aerr.StatusCode == http.StatusConflict { + if aerr.StatusCode == http.StatusConflict && aerr.ErrorCode == "ALREADY_EXISTS" { return FileAlreadyExistsError{absPath} } @@ -148,14 +195,20 @@ func (w *FilesClient) Read(ctx context.Context, name string) (io.ReadCloser, err // This API returns a 404 if the specified path does not exist. if aerr.StatusCode == http.StatusNotFound { + // Check if the path is a directory. If so, return not a file error. + if _, err := w.statDir(ctx, name); err == nil { + return nil, NotAFile{absPath} + } + + // No file or directory exists at the specified path. Return no such file error. return nil, FileDoesNotExistError{absPath} } return nil, err } -func (w *FilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { - absPath, urlPath, err := w.urlPath(name) +func (w *FilesClient) deleteFile(ctx context.Context, name string) error { + absPath, err := w.root.Join(name) if err != nil { return err } @@ -165,53 +218,232 @@ func (w *FilesClient) Delete(ctx context.Context, name string, mode ...DeleteMod return CannotDeleteRootError{} } - err = w.apiClient.Do(ctx, http.MethodDelete, urlPath, nil, nil, nil) + err = w.workspaceClient.Files.DeleteByFilePath(ctx, absPath) // Return early on success. if err == nil { return nil } - // Special handling of this error only if it is an API error. var aerr *apierr.APIError + // Special handling of this error only if it is an API error. if !errors.As(err, &aerr) { return err } - // This API returns a 404 if the specified path does not exist. + // This files delete API returns a 404 if the specified path does not exist. if aerr.StatusCode == http.StatusNotFound { return FileDoesNotExistError{absPath} } - // This API returns 409 if the underlying path is a directory. - if aerr.StatusCode == http.StatusConflict { + return err +} + +func (w *FilesClient) deleteDirectory(ctx context.Context, name string) error { + absPath, err := w.root.Join(name) + if err != nil { + return err + } + + // Illegal to delete the root path. + if absPath == w.root.rootPath { + return CannotDeleteRootError{} + } + + err = w.workspaceClient.Files.DeleteDirectoryByDirectoryPath(ctx, absPath) + + var aerr *apierr.APIError + // Special handling of this error only if it is an API error. + if !errors.As(err, &aerr) { + return err + } + + // The directory delete API returns a 400 if the directory is not empty + if aerr.StatusCode == http.StatusBadRequest { + reasons := []string{} + for _, detail := range aerr.Details { + reasons = append(reasons, detail.Reason) + } + // Error code 400 is generic and can be returned for other reasons. Make + // sure one of the reasons for the error is that the directory is not empty. + if !slices.Contains(reasons, "FILES_API_DIRECTORY_IS_NOT_EMPTY") { + return err + } return DirectoryNotEmptyError{absPath} } return err } -func (w *FilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { - return nil, filesNotImplementedError("ReadDir") -} +func (w *FilesClient) recursiveDelete(ctx context.Context, name string) error { + filerFS := NewFS(ctx, w) + dirsToDelete := make([]string, 0) + filesToDelete := make([]string, 0) + callback := func(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } -func (w *FilesClient) Mkdir(ctx context.Context, name string) error { - // Directories are created implicitly. - // No need to do anything. + // Files API does not allowing deleting non-empty directories. We instead + // collect the directories to delete and delete them once all the files have + // been deleted. + if d.IsDir() { + dirsToDelete = append(dirsToDelete, path) + return nil + } + + filesToDelete = append(filesToDelete, path) + return nil + } + + // Walk the directory and accumulate the files and directories to delete. + err := fs.WalkDir(filerFS, name, callback) + if err != nil { + return err + } + + // Delete the files in parallel. + group, groupCtx := errgroup.WithContext(ctx) + group.SetLimit(maxFilesRequestsInFlight) + + for _, file := range filesToDelete { + file := file + + // Skip the file if the context has already been cancelled. + select { + case <-groupCtx.Done(): + continue + default: + // Proceed. + } + + group.Go(func() error { + return w.deleteFile(groupCtx, file) + }) + } + + // Wait for the files to be deleted and return the first non-nil error. + err = group.Wait() + if err != nil { + return err + } + + // Delete the directories in reverse order to ensure that the parent + // directories are deleted after the children. This is possible because + // fs.WalkDir walks the directories in lexicographical order. + for i := len(dirsToDelete) - 1; i >= 0; i-- { + err := w.deleteDirectory(ctx, dirsToDelete[i]) + if err != nil { + return err + } + } return nil } -func (w *FilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { - absPath, urlPath, err := w.urlPath(name) +func (w *FilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { + if slices.Contains(mode, DeleteRecursively) { + return w.recursiveDelete(ctx, name) + } + + // Issue a stat call to determine if the path is a file or directory. + info, err := w.Stat(ctx, name) + if err != nil { + return err + } + + // Issue the delete call for a directory + if info.IsDir() { + return w.deleteDirectory(ctx, name) + } + + return w.deleteFile(ctx, name) +} + +func (w *FilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { + absPath, err := w.root.Join(name) if err != nil { return nil, err } - err = w.apiClient.Do(ctx, http.MethodHead, urlPath, nil, nil, nil) + iter := w.workspaceClient.Files.ListDirectoryContents(ctx, files.ListDirectoryContentsRequest{ + DirectoryPath: absPath, + }) + + files, err := listing.ToSlice(ctx, iter) + + // Return early on success. + if err == nil { + entries := make([]fs.DirEntry, len(files)) + for i, file := range files { + entries[i] = filesApiDirEntry{ + i: filesApiFileInfo{ + absPath: file.Path, + isDir: file.IsDirectory, + fileSize: file.FileSize, + lastModified: file.LastModified, + }, + } + } + + // Sort by name for parity with os.ReadDir. + sort.Slice(entries, func(i, j int) bool { return entries[i].Name() < entries[j].Name() }) + return entries, nil + } + + // Special handling of this error only if it is an API error. + var apierr *apierr.APIError + if !errors.As(err, &apierr) { + return nil, err + } + + // This API returns a 404 if the specified path does not exist. + if apierr.StatusCode == http.StatusNotFound { + // Check if the path is a file. If so, return not a directory error. + if _, err := w.statFile(ctx, name); err == nil { + return nil, NotADirectory{absPath} + } + + // No file or directory exists at the specified path. Return no such directory error. + return nil, NoSuchDirectoryError{absPath} + } + return nil, err +} + +func (w *FilesClient) Mkdir(ctx context.Context, name string) error { + absPath, err := w.root.Join(name) + if err != nil { + return err + } + + err = w.workspaceClient.Files.CreateDirectory(ctx, files.CreateDirectoryRequest{ + DirectoryPath: absPath, + }) + + // Special handling of this error only if it is an API error. + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.StatusCode == http.StatusConflict { + return FileAlreadyExistsError{absPath} + } + + return err +} + +// Get file metadata for a file using the Files API. +func (w *FilesClient) statFile(ctx context.Context, name string) (fs.FileInfo, error) { + absPath, err := w.root.Join(name) + if err != nil { + return nil, err + } + + fileInfo, err := w.workspaceClient.Files.GetMetadataByFilePath(ctx, absPath) // If the HEAD requests succeeds, the file exists. if err == nil { - return filesApiFileInfo{absPath: absPath, isDir: false}, nil + return filesApiFileInfo{ + absPath: absPath, + isDir: false, + fileSize: fileInfo.ContentLength, + }, nil } // Special handling of this error only if it is an API error. @@ -225,10 +457,51 @@ func (w *FilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error return nil, FileDoesNotExistError{absPath} } - // This API returns 409 if the underlying path is a directory. - if aerr.StatusCode == http.StatusConflict { + return nil, err +} + +// Get file metadata for a directory using the Files API. +func (w *FilesClient) statDir(ctx context.Context, name string) (fs.FileInfo, error) { + absPath, err := w.root.Join(name) + if err != nil { + return nil, err + } + + err = w.workspaceClient.Files.GetDirectoryMetadataByDirectoryPath(ctx, absPath) + + // If the HEAD requests succeeds, the directory exists. + if err == nil { return filesApiFileInfo{absPath: absPath, isDir: true}, nil } + // Special handling of this error only if it is an API error. + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + return nil, err + } + + // The directory metadata API returns a 404 if the specified path does not exist. + if aerr.StatusCode == http.StatusNotFound { + return nil, NoSuchDirectoryError{absPath} + } + return nil, err } + +func (w *FilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { + // Assume that the path is a directory and issue a stat call. + dirInfo, err := w.statDir(ctx, name) + + // If the file exists, return early. + if err == nil { + return dirInfo, nil + } + + // Return early if the error is not a NoSuchDirectoryError. + if !errors.As(err, &NoSuchDirectoryError{}) { + return nil, err + } + + // Since the path is not a directory, assume that it is a file and issue a stat call. + return w.statFile(ctx, name) +} diff --git a/libs/filer/local_client.go b/libs/filer/local_client.go index 958b6277d..48e8a05ee 100644 --- a/libs/filer/local_client.go +++ b/libs/filer/local_client.go @@ -2,6 +2,7 @@ package filer import ( "context" + "errors" "io" "io/fs" "os" @@ -34,9 +35,8 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, flags |= os.O_EXCL } - absPath = filepath.FromSlash(absPath) f, err := os.OpenFile(absPath, flags, 0644) - if os.IsNotExist(err) && slices.Contains(mode, CreateParentDirectories) { + if errors.Is(err, fs.ErrNotExist) && slices.Contains(mode, CreateParentDirectories) { // Create parent directories if they don't exist. err = os.MkdirAll(filepath.Dir(absPath), 0755) if err != nil { @@ -48,9 +48,9 @@ func (w *LocalClient) Write(ctx context.Context, name string, reader io.Reader, if err != nil { switch { - case os.IsNotExist(err): + case errors.Is(err, fs.ErrNotExist): return NoSuchDirectoryError{path: absPath} - case os.IsExist(err): + case errors.Is(err, fs.ErrExist): return FileAlreadyExistsError{path: absPath} default: return err @@ -76,10 +76,9 @@ func (w *LocalClient) Read(ctx context.Context, name string) (io.ReadCloser, err // This stat call serves two purposes: // 1. Checks file at path exists, and throws an error if it does not // 2. Allows us to error out if the path is a directory - absPath = filepath.FromSlash(absPath) stat, err := os.Stat(absPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, FileDoesNotExistError{path: absPath} } return nil, err @@ -103,7 +102,6 @@ func (w *LocalClient) Delete(ctx context.Context, name string, mode ...DeleteMod return CannotDeleteRootError{} } - absPath = filepath.FromSlash(absPath) err = os.Remove(absPath) // Return early on success. @@ -111,11 +109,11 @@ func (w *LocalClient) Delete(ctx context.Context, name string, mode ...DeleteMod return nil } - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return FileDoesNotExistError{path: absPath} } - if os.IsExist(err) { + if errors.Is(err, fs.ErrExist) { if slices.Contains(mode, DeleteRecursively) { return os.RemoveAll(absPath) } @@ -131,10 +129,9 @@ func (w *LocalClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, return nil, err } - absPath = filepath.FromSlash(absPath) stat, err := os.Stat(absPath) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, NoSuchDirectoryError{path: absPath} } return nil, err @@ -153,7 +150,6 @@ func (w *LocalClient) Mkdir(ctx context.Context, name string) error { return err } - dirPath = filepath.FromSlash(dirPath) return os.MkdirAll(dirPath, 0755) } @@ -163,9 +159,8 @@ func (w *LocalClient) Stat(ctx context.Context, name string) (fs.FileInfo, error return nil, err } - absPath = filepath.FromSlash(absPath) stat, err := os.Stat(absPath) - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil, FileDoesNotExistError{path: absPath} } return stat, err diff --git a/libs/filer/local_root_path.go b/libs/filer/local_root_path.go index 15a542631..3f8843093 100644 --- a/libs/filer/local_root_path.go +++ b/libs/filer/local_root_path.go @@ -19,7 +19,6 @@ func NewLocalRootPath(root string) localRootPath { func (rp *localRootPath) Join(name string) (string, error) { absPath := filepath.Join(rp.rootPath, name) - if !strings.HasPrefix(absPath, rp.rootPath) { return "", fmt.Errorf("relative path escapes root: %s", name) } diff --git a/libs/filer/workspace_files_client.go b/libs/filer/workspace_files_client.go index 41e35d9d1..d799c1f88 100644 --- a/libs/filer/workspace_files_client.go +++ b/libs/filer/workspace_files_client.go @@ -19,6 +19,7 @@ import ( "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/marshal" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -35,22 +36,36 @@ func (entry wsfsDirEntry) Info() (fs.FileInfo, error) { return entry.wsfsFileInfo, nil } +func wsfsDirEntriesFromObjectInfos(objects []workspace.ObjectInfo) []fs.DirEntry { + info := make([]fs.DirEntry, len(objects)) + for i, v := range objects { + info[i] = wsfsDirEntry{wsfsFileInfo{ObjectInfo: v}} + } + + // Sort by name for parity with os.ReadDir. + sort.Slice(info, func(i, j int) bool { return info[i].Name() < info[j].Name() }) + return info +} + // Type that implements fs.FileInfo for WSFS. type wsfsFileInfo struct { - oi workspace.ObjectInfo + workspace.ObjectInfo + + // The export format of a notebook. This is not exposed by the SDK. + ReposExportFormat workspace.ExportFormat `json:"repos_export_format,omitempty"` } func (info wsfsFileInfo) Name() string { - return path.Base(info.oi.Path) + return path.Base(info.ObjectInfo.Path) } func (info wsfsFileInfo) Size() int64 { - return info.oi.Size + return info.ObjectInfo.Size } func (info wsfsFileInfo) Mode() fs.FileMode { - switch info.oi.ObjectType { - case workspace.ObjectTypeDirectory: + switch info.ObjectInfo.ObjectType { + case workspace.ObjectTypeDirectory, workspace.ObjectTypeRepo: return fs.ModeDir default: return fs.ModePerm @@ -58,15 +73,29 @@ func (info wsfsFileInfo) Mode() fs.FileMode { } func (info wsfsFileInfo) ModTime() time.Time { - return time.UnixMilli(info.oi.ModifiedAt) + return time.UnixMilli(info.ObjectInfo.ModifiedAt) } func (info wsfsFileInfo) IsDir() bool { - return info.oi.ObjectType == workspace.ObjectTypeDirectory + return info.Mode() == fs.ModeDir } func (info wsfsFileInfo) Sys() any { - return info.oi + return info.ObjectInfo +} + +// UnmarshalJSON is a custom unmarshaller for the wsfsFileInfo struct. +// It must be defined for this type because otherwise the implementation +// of the embedded ObjectInfo type will be used. +func (info *wsfsFileInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, info) +} + +// MarshalJSON is a custom marshaller for the wsfsFileInfo struct. +// It must be defined for this type because otherwise the implementation +// of the embedded ObjectInfo type will be used. +func (info *wsfsFileInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(info) } // WorkspaceFilesClient implements the files-in-workspace API. @@ -262,14 +291,8 @@ func (w *WorkspaceFilesClient) ReadDir(ctx context.Context, name string) ([]fs.D return nil, err } - info := make([]fs.DirEntry, len(objects)) - for i, v := range objects { - info[i] = wsfsDirEntry{wsfsFileInfo{oi: v}} - } - - // Sort by name for parity with os.ReadDir. - sort.Slice(info, func(i, j int) bool { return info[i].Name() < info[j].Name() }) - return info, nil + // Convert to fs.DirEntry. + return wsfsDirEntriesFromObjectInfos(objects), nil } func (w *WorkspaceFilesClient) Mkdir(ctx context.Context, name string) error { @@ -288,7 +311,22 @@ func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileIn return nil, err } - info, err := w.workspaceClient.Workspace.GetStatusByPath(ctx, absPath) + var stat wsfsFileInfo + + // Perform bespoke API call because "return_export_info" is not exposed by the SDK. + // We need "repos_export_format" to determine if the file is a py or a ipynb notebook. + // This is not exposed by the SDK so we need to make a direct API call. + err = w.apiClient.Do( + ctx, + http.MethodGet, + "/api/2.0/workspace/get-status", + nil, + map[string]string{ + "path": absPath, + "return_export_info": "true", + }, + &stat, + ) if err != nil { // If we got an API error we deal with it below. var aerr *apierr.APIError @@ -302,5 +340,5 @@ func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileIn } } - return wsfsFileInfo{*info}, nil + return stat, nil } diff --git a/libs/filer/workspace_files_client_test.go b/libs/filer/workspace_files_client_test.go new file mode 100644 index 000000000..650b5be68 --- /dev/null +++ b/libs/filer/workspace_files_client_test.go @@ -0,0 +1,95 @@ +package filer + +import ( + "encoding/json" + "io/fs" + "testing" + "time" + + "github.com/databricks/databricks-sdk-go/service/workspace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestWorkspaceFilesDirEntry(t *testing.T) { + entries := wsfsDirEntriesFromObjectInfos([]workspace.ObjectInfo{ + { + Path: "/dir", + ObjectType: workspace.ObjectTypeDirectory, + }, + { + Path: "/file", + ObjectType: workspace.ObjectTypeFile, + Size: 42, + }, + { + Path: "/repo", + ObjectType: workspace.ObjectTypeRepo, + }, + }) + + // Confirm the path is passed through correctly. + assert.Equal(t, "dir", entries[0].Name()) + assert.Equal(t, "file", entries[1].Name()) + assert.Equal(t, "repo", entries[2].Name()) + + // Confirm the type is passed through correctly. + assert.Equal(t, fs.ModeDir, entries[0].Type()) + assert.Equal(t, fs.ModePerm, entries[1].Type()) + assert.Equal(t, fs.ModeDir, entries[2].Type()) + + // Get [fs.FileInfo] from directory entry. + i0, err := entries[0].Info() + require.NoError(t, err) + i1, err := entries[1].Info() + require.NoError(t, err) + i2, err := entries[2].Info() + require.NoError(t, err) + + // Confirm size. + assert.Equal(t, int64(0), i0.Size()) + assert.Equal(t, int64(42), i1.Size()) + assert.Equal(t, int64(0), i2.Size()) + + // Confirm IsDir. + assert.True(t, i0.IsDir()) + assert.False(t, i1.IsDir()) + assert.True(t, i2.IsDir()) +} + +func TestWorkspaceFilesClient_wsfsUnmarshal(t *testing.T) { + payload := ` + { + "created_at": 1671030805916, + "language": "PYTHON", + "modified_at": 1671032235392, + "object_id": 795822750063438, + "object_type": "NOTEBOOK", + "path": "/some/path/to/a/notebook", + "repos_export_format": "SOURCE", + "resource_id": "795822750063438" + } + ` + + var info wsfsFileInfo + err := json.Unmarshal([]byte(payload), &info) + require.NoError(t, err) + + // Fields in the object info. + assert.Equal(t, int64(1671030805916), info.CreatedAt) + assert.Equal(t, workspace.LanguagePython, info.Language) + assert.Equal(t, int64(1671032235392), info.ModifiedAt) + assert.Equal(t, int64(795822750063438), info.ObjectId) + assert.Equal(t, workspace.ObjectTypeNotebook, info.ObjectType) + assert.Equal(t, "/some/path/to/a/notebook", info.Path) + assert.Equal(t, workspace.ExportFormatSource, info.ReposExportFormat) + assert.Equal(t, "795822750063438", info.ResourceId) + + // Functions for fs.FileInfo. + assert.Equal(t, "notebook", info.Name()) + assert.Equal(t, int64(0), info.Size()) + assert.Equal(t, fs.ModePerm, info.Mode()) + assert.Equal(t, time.UnixMilli(1671032235392), info.ModTime()) + assert.False(t, info.IsDir()) + assert.NotNil(t, info.Sys()) +} diff --git a/libs/filer/workspace_files_extensions_client.go b/libs/filer/workspace_files_extensions_client.go new file mode 100644 index 000000000..a872dcc65 --- /dev/null +++ b/libs/filer/workspace_files_extensions_client.go @@ -0,0 +1,296 @@ +package filer + +import ( + "context" + "errors" + "fmt" + "io" + "io/fs" + "path" + "strings" + + "github.com/databricks/cli/libs/log" + "github.com/databricks/cli/libs/notebook" + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type workspaceFilesExtensionsClient struct { + workspaceClient *databricks.WorkspaceClient + + wsfs Filer + root string +} + +var extensionsToLanguages = map[string]workspace.Language{ + ".py": workspace.LanguagePython, + ".r": workspace.LanguageR, + ".scala": workspace.LanguageScala, + ".sql": workspace.LanguageSql, + ".ipynb": workspace.LanguagePython, +} + +type workspaceFileStatus struct { + wsfsFileInfo + + // Name of the file to be used in any API calls made using the workspace files + // filer. For notebooks this path does not include the extension. + nameForWorkspaceAPI string +} + +func (w *workspaceFilesExtensionsClient) stat(ctx context.Context, name string) (wsfsFileInfo, error) { + info, err := w.wsfs.Stat(ctx, name) + if err != nil { + return wsfsFileInfo{}, err + } + return info.(wsfsFileInfo), err +} + +// This function returns the stat for the provided notebook. The stat object itself contains the path +// with the extension since it is meant to be used in the context of a fs.FileInfo. +func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithExt(ctx context.Context, name string) (*workspaceFileStatus, error) { + ext := path.Ext(name) + nameWithoutExt := strings.TrimSuffix(name, ext) + + // File name does not have an extension associated with Databricks notebooks, return early. + if _, ok := extensionsToLanguages[ext]; !ok { + return nil, nil + } + + // If the file could be a notebook, check if it is and has the correct language. + stat, err := w.stat(ctx, nameWithoutExt) + if err != nil { + // If the file does not exist, return early. + if errors.As(err, &FileDoesNotExistError{}) { + return nil, nil + } + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Failed to fetch the status of object at %s: %s", name, path.Join(w.root, nameWithoutExt), err) + return nil, err + } + + // Not a notebook. Return early. + if stat.ObjectType != workspace.ObjectTypeNotebook { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found an object at %s but it is not a notebook. It is a %s.", name, path.Join(w.root, nameWithoutExt), stat.ObjectType) + return nil, nil + } + + // Not the correct language. Return early. + if stat.Language != extensionsToLanguages[ext] { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not of the correct language. Expected %s but found %s.", name, path.Join(w.root, nameWithoutExt), extensionsToLanguages[ext], stat.Language) + return nil, nil + } + + // When the extension is .py we expect the export format to be source. + // If it's not, return early. + if ext == ".py" && stat.ReposExportFormat != workspace.ExportFormatSource { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not exported as a source notebook. Its export format is %s.", name, path.Join(w.root, nameWithoutExt), stat.ReposExportFormat) + return nil, nil + } + + // When the extension is .ipynb we expect the export format to be Jupyter. + // If it's not, return early. + if ext == ".ipynb" && stat.ReposExportFormat != workspace.ExportFormatJupyter { + log.Debugf(ctx, "attempting to determine if %s could be a notebook. Found a notebook at %s but it is not exported as a Jupyter notebook. Its export format is %s.", name, path.Join(w.root, nameWithoutExt), stat.ReposExportFormat) + return nil, nil + } + + // Modify the stat object path to include the extension. This stat object will be used + // to return the fs.FileInfo object in the stat method. + stat.Path = stat.Path + ext + return &workspaceFileStatus{ + wsfsFileInfo: stat, + nameForWorkspaceAPI: nameWithoutExt, + }, nil +} + +func (w *workspaceFilesExtensionsClient) getNotebookStatByNameWithoutExt(ctx context.Context, name string) (*workspaceFileStatus, error) { + stat, err := w.stat(ctx, name) + if err != nil { + return nil, err + } + + // We expect this internal function to only be called from [ReadDir] when we are sure + // that the object is a notebook. Thus, this should never happen. + if stat.ObjectType != workspace.ObjectTypeNotebook { + return nil, fmt.Errorf("expected object at %s to be a notebook but it is a %s", path.Join(w.root, name), stat.ObjectType) + } + + // Get the extension for the notebook. + ext := notebook.GetExtensionByLanguage(&stat.ObjectInfo) + + // If the notebook was exported as a Jupyter notebook, the extension should be .ipynb. + if stat.Language == workspace.LanguagePython && stat.ReposExportFormat == workspace.ExportFormatJupyter { + ext = ".ipynb" + } + + // Modify the stat object path to include the extension. This stat object will be used + // to return the fs.DirEntry object in the ReadDir method. + stat.Path = stat.Path + ext + return &workspaceFileStatus{ + wsfsFileInfo: stat, + nameForWorkspaceAPI: name, + }, nil +} + +type DuplicatePathError struct { + oi1 workspace.ObjectInfo + oi2 workspace.ObjectInfo + + commonName string +} + +func (e DuplicatePathError) Error() string { + return fmt.Sprintf("failed to read files from the workspace file system. Duplicate paths encountered. Both %s at %s and %s at %s resolve to the same name %s. Changing the name of one of these objects will resolve this issue", e.oi1.ObjectType, e.oi1.Path, e.oi2.ObjectType, e.oi2.Path, e.commonName) +} + +// This is a filer for the workspace file system that allows you to pretend the +// workspace file system is a traditional file system. It allows you to list, read, write, +// delete, and stat notebooks (and files in general) in the workspace, using their paths +// with the extension included. +// +// The ReadDir method returns a DuplicatePathError if this traditional file system view is +// not possible. For example, a Python notebook called foo and a Python file called `foo.py` +// would resolve to the same path `foo.py` in a tradition file system. +// +// Users of this filer should be careful when using the Write and Mkdir methods. +// The underlying import API we use to upload notebooks and files returns opaque internal +// errors for namespace clashes (e.g. a file and a notebook or a directory and a notebook). +// Thus users of these methods should be careful to avoid such clashes. +func NewWorkspaceFilesExtensionsClient(w *databricks.WorkspaceClient, root string) (Filer, error) { + filer, err := NewWorkspaceFilesClient(w, root) + if err != nil { + return nil, err + } + + return &workspaceFilesExtensionsClient{ + workspaceClient: w, + + wsfs: filer, + root: root, + }, nil +} + +func (w *workspaceFilesExtensionsClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { + entries, err := w.wsfs.ReadDir(ctx, name) + if err != nil { + return nil, err + } + + seenPaths := make(map[string]workspace.ObjectInfo) + for i := range entries { + info, err := entries[i].Info() + if err != nil { + return nil, err + } + sysInfo := info.Sys().(workspace.ObjectInfo) + + // If the object is a notebook, include an extension in the entry. + if sysInfo.ObjectType == workspace.ObjectTypeNotebook { + stat, err := w.getNotebookStatByNameWithoutExt(ctx, path.Join(name, entries[i].Name())) + if err != nil { + return nil, err + } + // Replace the entry with the new entry that includes the extension. + entries[i] = wsfsDirEntry{wsfsFileInfo{ObjectInfo: stat.ObjectInfo}} + } + + // Error if we have seen this path before in the current directory. + // If not seen before, add it to the seen paths. + if _, ok := seenPaths[entries[i].Name()]; ok { + return nil, DuplicatePathError{ + oi1: seenPaths[entries[i].Name()], + oi2: sysInfo, + commonName: path.Join(name, entries[i].Name()), + } + } + seenPaths[entries[i].Name()] = sysInfo + } + + return entries, nil +} + +// Note: The import API returns opaque internal errors for namespace clashes +// (e.g. a file and a notebook or a directory and a notebook). Thus users of this +// method should be careful to avoid such clashes. +func (w *workspaceFilesExtensionsClient) Write(ctx context.Context, name string, reader io.Reader, mode ...WriteMode) error { + return w.wsfs.Write(ctx, name, reader, mode...) +} + +// Try to read the file as a regular file. If the file is not found, try to read it as a notebook. +func (w *workspaceFilesExtensionsClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { + r, err := w.wsfs.Read(ctx, name) + + // If the file is not found, it might be a notebook. + if errors.As(err, &FileDoesNotExistError{}) { + stat, serr := w.getNotebookStatByNameWithExt(ctx, name) + if serr != nil { + // Unable to stat. Return the stat error. + return nil, serr + } + if stat == nil { + // Not a notebook. Return the original error. + return nil, err + } + + // The workspace files filer performs an additional stat call to make sure + // the path is not a directory. We can skip this step since we already have + // the stat object and know that the path is a notebook. + return w.workspaceClient.Workspace.Download( + ctx, + path.Join(w.root, stat.nameForWorkspaceAPI), + workspace.DownloadFormat(stat.ReposExportFormat), + ) + } + return r, err +} + +// Try to delete the file as a regular file. If the file is not found, try to delete it as a notebook. +func (w *workspaceFilesExtensionsClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { + err := w.wsfs.Delete(ctx, name, mode...) + + // If the file is not found, it might be a notebook. + if errors.As(err, &FileDoesNotExistError{}) { + stat, serr := w.getNotebookStatByNameWithExt(ctx, name) + if serr != nil { + // Unable to stat. Return the stat error. + return serr + } + if stat == nil { + // Not a notebook. Return the original error. + return err + } + + return w.wsfs.Delete(ctx, stat.nameForWorkspaceAPI, mode...) + } + + return err +} + +// Try to stat the file as a regular file. If the file is not found, try to stat it as a notebook. +func (w *workspaceFilesExtensionsClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { + info, err := w.wsfs.Stat(ctx, name) + + // If the file is not found, it might be a notebook. + if errors.As(err, &FileDoesNotExistError{}) { + stat, serr := w.getNotebookStatByNameWithExt(ctx, name) + if serr != nil { + // Unable to stat. Return the stat error. + return nil, serr + } + if stat == nil { + // Not a notebook. Return the original error. + return nil, err + } + + return wsfsFileInfo{ObjectInfo: stat.ObjectInfo}, nil + } + + return info, err +} + +// Note: The import API returns opaque internal errors for namespace clashes +// (e.g. a file and a notebook or a directory and a notebook). Thus users of this +// method should be careful to avoid such clashes. +func (w *workspaceFilesExtensionsClient) Mkdir(ctx context.Context, name string) error { + return w.wsfs.Mkdir(ctx, name) +} diff --git a/libs/fileset/file.go b/libs/fileset/file.go index 6594de4ed..fd846b257 100644 --- a/libs/fileset/file.go +++ b/libs/fileset/file.go @@ -3,18 +3,84 @@ package fileset import ( "io/fs" "time" + + "github.com/databricks/cli/libs/notebook" + "github.com/databricks/cli/libs/vfs" +) + +type fileType int + +const ( + Unknown fileType = iota + Notebook // Databricks notebook file + Source // Any other file type ) type File struct { - fs.DirEntry - Absolute, Relative string + // Root path of the fileset. + root vfs.Path + + // File entry as returned by the [fs.WalkDir] function. + entry fs.DirEntry + + // Type of the file. + fileType fileType + + // Relative path within the fileset. + // Combine with the [vfs.Path] to interact with the underlying file. + Relative string +} + +func NewNotebookFile(root vfs.Path, entry fs.DirEntry, relative string) File { + return File{ + root: root, + entry: entry, + fileType: Notebook, + Relative: relative, + } +} + +func NewFile(root vfs.Path, entry fs.DirEntry, relative string) File { + return File{ + root: root, + entry: entry, + fileType: Unknown, + Relative: relative, + } +} + +func NewSourceFile(root vfs.Path, entry fs.DirEntry, relative string) File { + return File{ + root: root, + entry: entry, + fileType: Source, + Relative: relative, + } } func (f File) Modified() (ts time.Time) { - info, err := f.Info() + info, err := f.entry.Info() if err != nil { // return default time, beginning of epoch return ts } return info.ModTime() } + +func (f *File) IsNotebook() (bool, error) { + if f.fileType != Unknown { + return f.fileType == Notebook, nil + } + + // Otherwise, detect the notebook type. + isNotebook, _, err := notebook.DetectWithFS(f.root, f.Relative) + if err != nil { + return false, err + } + if isNotebook { + f.fileType = Notebook + } else { + f.fileType = Source + } + return isNotebook, nil +} diff --git a/libs/fileset/file_test.go b/libs/fileset/file_test.go new file mode 100644 index 000000000..1ce1ff59a --- /dev/null +++ b/libs/fileset/file_test.go @@ -0,0 +1,44 @@ +package fileset + +import ( + "testing" + + "github.com/databricks/cli/internal/testutil" + "github.com/databricks/cli/libs/vfs" + "github.com/stretchr/testify/require" +) + +func TestNotebookFileIsNotebook(t *testing.T) { + f := NewNotebookFile(nil, nil, "") + isNotebook, err := f.IsNotebook() + require.NoError(t, err) + require.True(t, isNotebook) +} + +func TestSourceFileIsNotNotebook(t *testing.T) { + f := NewSourceFile(nil, nil, "") + isNotebook, err := f.IsNotebook() + require.NoError(t, err) + require.False(t, isNotebook) +} + +func TestUnknownFileDetectsNotebook(t *testing.T) { + tmpDir := t.TempDir() + root := vfs.MustNew(tmpDir) + + t.Run("file", func(t *testing.T) { + testutil.Touch(t, tmpDir, "test.py") + f := NewFile(root, nil, "test.py") + isNotebook, err := f.IsNotebook() + require.NoError(t, err) + require.False(t, isNotebook) + }) + + t.Run("notebook", func(t *testing.T) { + testutil.TouchNotebook(t, tmpDir, "notebook.py") + f := NewFile(root, nil, "notebook.py") + isNotebook, err := f.IsNotebook() + require.NoError(t, err) + require.True(t, isNotebook) + }) +} diff --git a/libs/fileset/fileset.go b/libs/fileset/fileset.go index 81b85525c..d0f00f97a 100644 --- a/libs/fileset/fileset.go +++ b/libs/fileset/fileset.go @@ -4,20 +4,24 @@ import ( "fmt" "io/fs" "os" - "path/filepath" + + "github.com/databricks/cli/libs/vfs" ) // FileSet facilitates fast recursive file listing of a path. // It optionally takes into account ignore rules through the [Ignorer] interface. type FileSet struct { - root string + // Root path of the fileset. + root vfs.Path + + // Ignorer interface to check if a file or directory should be ignored. ignore Ignorer } // New returns a [FileSet] for the given root path. -func New(root string) *FileSet { +func New(root vfs.Path) *FileSet { return &FileSet{ - root: filepath.Clean(root), + root: root, ignore: nopIgnorer{}, } } @@ -32,11 +36,6 @@ func (w *FileSet) SetIgnorer(ignore Ignorer) { w.ignore = ignore } -// Return root for fileset. -func (w *FileSet) Root() string { - return w.root -} - // Return all tracked files for Repo func (w *FileSet) All() ([]File, error) { return w.recursiveListFiles() @@ -46,12 +45,7 @@ func (w *FileSet) All() ([]File, error) { // that are being tracked in the FileSet (ie not being ignored for matching one of the // patterns in w.ignore) func (w *FileSet) recursiveListFiles() (fileList []File, err error) { - err = filepath.WalkDir(w.root, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return err - } - - relPath, err := filepath.Rel(w.root, path) + err = fs.WalkDir(w.root, ".", func(name string, d fs.DirEntry, err error) error { if err != nil { return err } @@ -66,25 +60,25 @@ func (w *FileSet) recursiveListFiles() (fileList []File, err error) { } if d.IsDir() { - ign, err := w.ignore.IgnoreDirectory(relPath) + ign, err := w.ignore.IgnoreDirectory(name) if err != nil { - return fmt.Errorf("cannot check if %s should be ignored: %w", relPath, err) + return fmt.Errorf("cannot check if %s should be ignored: %w", name, err) } if ign { - return filepath.SkipDir + return fs.SkipDir } return nil } - ign, err := w.ignore.IgnoreFile(relPath) + ign, err := w.ignore.IgnoreFile(name) if err != nil { - return fmt.Errorf("cannot check if %s should be ignored: %w", relPath, err) + return fmt.Errorf("cannot check if %s should be ignored: %w", name, err) } if ign { return nil } - fileList = append(fileList, File{d, path, relPath}) + fileList = append(fileList, NewFile(w.root, d, name)) return nil }) return diff --git a/libs/fileset/glob.go b/libs/fileset/glob.go index 9d8626e54..0a1038472 100644 --- a/libs/fileset/glob.go +++ b/libs/fileset/glob.go @@ -1,22 +1,17 @@ package fileset import ( - "path/filepath" + "path" + + "github.com/databricks/cli/libs/vfs" ) -func NewGlobSet(root string, includes []string) (*FileSet, error) { - absRoot, err := filepath.Abs(root) - if err != nil { - return nil, err - } - +func NewGlobSet(root vfs.Path, includes []string) (*FileSet, error) { for k := range includes { - includes[k] = filepath.ToSlash(filepath.Clean(includes[k])) + includes[k] = path.Clean(includes[k]) } - fs := &FileSet{ - absRoot, - newIncluder(includes), - } + fs := New(root) + fs.SetIgnorer(newIncluder(includes)) return fs, nil } diff --git a/libs/fileset/glob_test.go b/libs/fileset/glob_test.go index e8d3696c4..70b9c444b 100644 --- a/libs/fileset/glob_test.go +++ b/libs/fileset/glob_test.go @@ -2,21 +2,26 @@ package fileset import ( "io/fs" - "os" - "path/filepath" + "path" "slices" "strings" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/require" ) -func TestGlobFileset(t *testing.T) { - cwd, err := os.Getwd() - require.NoError(t, err) - root := filepath.Join(cwd, "..", "filer") +func collectRelativePaths(files []File) []string { + relativePaths := make([]string, 0) + for _, f := range files { + relativePaths = append(relativePaths, f.Relative) + } + return relativePaths +} - entries, err := os.ReadDir(root) +func TestGlobFileset(t *testing.T) { + root := vfs.MustNew("../filer") + entries, err := root.ReadDir(".") require.NoError(t, err) g, err := NewGlobSet(root, []string{ @@ -30,7 +35,7 @@ func TestGlobFileset(t *testing.T) { require.Equal(t, len(files), len(entries)) for _, f := range files { exists := slices.ContainsFunc(entries, func(de fs.DirEntry) bool { - return de.Name() == f.Name() + return de.Name() == path.Base(f.Relative) }) require.True(t, exists) } @@ -46,9 +51,8 @@ func TestGlobFileset(t *testing.T) { } func TestGlobFilesetWithRelativeRoot(t *testing.T) { - root := filepath.Join("..", "filer") - - entries, err := os.ReadDir(root) + root := vfs.MustNew("../filer") + entries, err := root.ReadDir(".") require.NoError(t, err) g, err := NewGlobSet(root, []string{ @@ -58,21 +62,14 @@ func TestGlobFilesetWithRelativeRoot(t *testing.T) { files, err := g.All() require.NoError(t, err) - require.Equal(t, len(files), len(entries)) - for _, f := range files { - require.True(t, filepath.IsAbs(f.Absolute)) - } } func TestGlobFilesetRecursively(t *testing.T) { - cwd, err := os.Getwd() - require.NoError(t, err) - root := filepath.Join(cwd, "..", "git") - + root := vfs.MustNew("../git") entries := make([]string, 0) - err = filepath.Walk(filepath.Join(root, "testdata"), func(path string, info fs.FileInfo, err error) error { - if !info.IsDir() { + err := fs.WalkDir(root, "testdata", func(path string, d fs.DirEntry, err error) error { + if !d.IsDir() { entries = append(entries, path) } return nil @@ -86,24 +83,14 @@ func TestGlobFilesetRecursively(t *testing.T) { files, err := g.All() require.NoError(t, err) - - require.Equal(t, len(files), len(entries)) - for _, f := range files { - exists := slices.ContainsFunc(entries, func(path string) bool { - return path == f.Absolute - }) - require.True(t, exists) - } + require.ElementsMatch(t, entries, collectRelativePaths(files)) } func TestGlobFilesetDir(t *testing.T) { - cwd, err := os.Getwd() - require.NoError(t, err) - root := filepath.Join(cwd, "..", "git") - + root := vfs.MustNew("../git") entries := make([]string, 0) - err = filepath.Walk(filepath.Join(root, "testdata", "a"), func(path string, info fs.FileInfo, err error) error { - if !info.IsDir() { + err := fs.WalkDir(root, "testdata/a", func(path string, d fs.DirEntry, err error) error { + if !d.IsDir() { entries = append(entries, path) } return nil @@ -117,23 +104,13 @@ func TestGlobFilesetDir(t *testing.T) { files, err := g.All() require.NoError(t, err) - - require.Equal(t, len(files), len(entries)) - for _, f := range files { - exists := slices.ContainsFunc(entries, func(path string) bool { - return path == f.Absolute - }) - require.True(t, exists) - } + require.ElementsMatch(t, entries, collectRelativePaths(files)) } func TestGlobFilesetDoubleQuotesWithFilePatterns(t *testing.T) { - cwd, err := os.Getwd() - require.NoError(t, err) - root := filepath.Join(cwd, "..", "git") - + root := vfs.MustNew("../git") entries := make([]string, 0) - err = filepath.Walk(filepath.Join(root, "testdata"), func(path string, info fs.FileInfo, err error) error { + err := fs.WalkDir(root, "testdata", func(path string, d fs.DirEntry, err error) error { if strings.HasSuffix(path, ".txt") { entries = append(entries, path) } @@ -148,12 +125,5 @@ func TestGlobFilesetDoubleQuotesWithFilePatterns(t *testing.T) { files, err := g.All() require.NoError(t, err) - - require.Equal(t, len(files), len(entries)) - for _, f := range files { - exists := slices.ContainsFunc(entries, func(path string) bool { - return path == f.Absolute - }) - require.True(t, exists) - } + require.ElementsMatch(t, entries, collectRelativePaths(files)) } diff --git a/folders/folders.go b/libs/folders/folders.go similarity index 100% rename from folders/folders.go rename to libs/folders/folders.go diff --git a/folders/folders_test.go b/libs/folders/folders_test.go similarity index 94% rename from folders/folders_test.go rename to libs/folders/folders_test.go index 9aa387070..17afc4022 100644 --- a/folders/folders_test.go +++ b/libs/folders/folders_test.go @@ -13,7 +13,7 @@ func TestFindDirWithLeaf(t *testing.T) { wd, err := os.Getwd() require.NoError(t, err) - root := filepath.Join(wd, "..") + root := filepath.Join(wd, "..", "..") // Find from working directory should work. { diff --git a/libs/git/config.go b/libs/git/config.go index e83c75b7b..fafd81bd6 100644 --- a/libs/git/config.go +++ b/libs/git/config.go @@ -1,13 +1,16 @@ package git import ( + "errors" "fmt" "io" + "io/fs" "os" "path/filepath" "regexp" "strings" + "github.com/databricks/cli/libs/vfs" "gopkg.in/ini.v1" ) @@ -87,12 +90,12 @@ func (c config) load(r io.Reader) error { return nil } -func (c config) loadFile(path string) error { - f, err := os.Open(path) +func (c config) loadFile(root vfs.Path, path string) error { + f, err := root.Open(path) if err != nil { // If the file doesn't exist it is ignored. // This is the case for both global and repository specific config files. - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } return err @@ -129,7 +132,7 @@ func (c config) coreExcludesFile() (string, error) { // If there are other problems accessing this file we would // run into them at a later point anyway. _, err := os.Stat(path) - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return "", err } @@ -152,8 +155,8 @@ func globalGitConfig() (*config, error) { // > are missing or unreadable they will be ignored. // // We therefore ignore the error return value for the calls below. - config.loadFile(filepath.Join(xdgConfigHome, "git/config")) - config.loadFile(filepath.Join(config.home, ".gitconfig")) + config.loadFile(vfs.MustNew(xdgConfigHome), "git/config") + config.loadFile(vfs.MustNew(config.home), ".gitconfig") return config, nil } diff --git a/libs/git/fileset.go b/libs/git/fileset.go index c604ac7fa..f1986aa20 100644 --- a/libs/git/fileset.go +++ b/libs/git/fileset.go @@ -2,6 +2,7 @@ package git import ( "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" ) // FileSet is Git repository aware implementation of [fileset.FileSet]. @@ -13,7 +14,7 @@ type FileSet struct { } // NewFileSet returns [FileSet] for the Git repository located at `root`. -func NewFileSet(root string) (*FileSet, error) { +func NewFileSet(root vfs.Path) (*FileSet, error) { fs := fileset.New(root) v, err := NewView(root) if err != nil { @@ -34,10 +35,6 @@ func (f *FileSet) IgnoreDirectory(dir string) (bool, error) { return f.view.IgnoreDirectory(dir) } -func (f *FileSet) Root() string { - return f.fileset.Root() -} - func (f *FileSet) All() ([]fileset.File, error) { f.view.repo.taintIgnoreRules() return f.fileset.All() diff --git a/libs/git/fileset_test.go b/libs/git/fileset_test.go index 74133f525..4e6172bfd 100644 --- a/libs/git/fileset_test.go +++ b/libs/git/fileset_test.go @@ -2,23 +2,25 @@ package git import ( "os" + "path" "path/filepath" "strings" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) -func testFileSetAll(t *testing.T, path string) { - fileSet, err := NewFileSet(path) +func testFileSetAll(t *testing.T, root string) { + fileSet, err := NewFileSet(vfs.MustNew(root)) require.NoError(t, err) files, err := fileSet.All() require.NoError(t, err) require.Len(t, files, 3) - assert.Equal(t, filepath.Join("a", "b", "world.txt"), files[0].Relative) - assert.Equal(t, filepath.Join("a", "hello.txt"), files[1].Relative) - assert.Equal(t, filepath.Join("databricks.yml"), files[2].Relative) + assert.Equal(t, path.Join("a", "b", "world.txt"), files[0].Relative) + assert.Equal(t, path.Join("a", "hello.txt"), files[1].Relative) + assert.Equal(t, path.Join("databricks.yml"), files[2].Relative) } func TestFileSetListAllInRepo(t *testing.T) { @@ -33,7 +35,7 @@ func TestFileSetNonCleanRoot(t *testing.T) { // Test what happens if the root directory can be simplified. // Path simplification is done by most filepath functions. // This should yield the same result as above test. - fileSet, err := NewFileSet("./testdata/../testdata") + fileSet, err := NewFileSet(vfs.MustNew("./testdata/../testdata")) require.NoError(t, err) files, err := fileSet.All() require.NoError(t, err) @@ -42,7 +44,7 @@ func TestFileSetNonCleanRoot(t *testing.T) { func TestFileSetAddsCacheDirToGitIgnore(t *testing.T) { projectDir := t.TempDir() - fileSet, err := NewFileSet(projectDir) + fileSet, err := NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) fileSet.EnsureValidGitIgnoreExists() @@ -57,7 +59,7 @@ func TestFileSetDoesNotCacheDirToGitIgnoreIfAlreadyPresent(t *testing.T) { projectDir := t.TempDir() gitIgnorePath := filepath.Join(projectDir, ".gitignore") - fileSet, err := NewFileSet(projectDir) + fileSet, err := NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) err = os.WriteFile(gitIgnorePath, []byte(".databricks"), 0o644) require.NoError(t, err) diff --git a/libs/git/ignore.go b/libs/git/ignore.go index ec66a2b23..9f501e472 100644 --- a/libs/git/ignore.go +++ b/libs/git/ignore.go @@ -1,9 +1,12 @@ package git import ( - "os" + "errors" + "io/fs" + "strings" "time" + "github.com/databricks/cli/libs/vfs" ignore "github.com/sabhiram/go-gitignore" ) @@ -21,7 +24,8 @@ type ignoreRules interface { // ignoreFile represents a gitignore file backed by a path. // If the path doesn't exist (yet), it is treated as an empty file. type ignoreFile struct { - absPath string + root vfs.Path + path string // Signal a reload of this file. // Set this to call [os.Stat] and a potential reload @@ -35,9 +39,10 @@ type ignoreFile struct { patterns *ignore.GitIgnore } -func newIgnoreFile(absPath string) ignoreRules { +func newIgnoreFile(root vfs.Path, path string) ignoreRules { return &ignoreFile{ - absPath: absPath, + root: root, + path: path, checkForReload: true, } } @@ -67,9 +72,9 @@ func (f *ignoreFile) Taint() { func (f *ignoreFile) load() error { // The file must be stat-able. // If it doesn't exist, treat it as an empty file. - stat, err := os.Stat(f.absPath) + stat, err := fs.Stat(f.root, f.path) if err != nil { - if os.IsNotExist(err) { + if errors.Is(err, fs.ErrNotExist) { return nil } return err @@ -82,7 +87,7 @@ func (f *ignoreFile) load() error { } f.modTime = stat.ModTime() - f.patterns, err = ignore.CompileIgnoreFile(f.absPath) + f.patterns, err = f.loadGitignore() if err != nil { return err } @@ -90,6 +95,16 @@ func (f *ignoreFile) load() error { return nil } +func (f *ignoreFile) loadGitignore() (*ignore.GitIgnore, error) { + data, err := fs.ReadFile(f.root, f.path) + if err != nil { + return nil, err + } + + lines := strings.Split(string(data), "\n") + return ignore.CompileIgnoreLines(lines...), nil +} + // stringIgnoreRules implements the [ignoreRules] interface // for a set of in-memory ignore patterns. type stringIgnoreRules struct { diff --git a/libs/git/ignore_test.go b/libs/git/ignore_test.go index 160f53d7b..057c0cb2e 100644 --- a/libs/git/ignore_test.go +++ b/libs/git/ignore_test.go @@ -5,6 +5,7 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -13,7 +14,7 @@ func TestIgnoreFile(t *testing.T) { var ign bool var err error - f := newIgnoreFile("./testdata/.gitignore") + f := newIgnoreFile(vfs.MustNew("testdata"), ".gitignore") ign, err = f.MatchesPath("root.foo") require.NoError(t, err) assert.True(t, ign) @@ -27,7 +28,7 @@ func TestIgnoreFileDoesntExist(t *testing.T) { var err error // Files that don't exist are treated as an empty gitignore file. - f := newIgnoreFile("./testdata/thispathdoesntexist") + f := newIgnoreFile(vfs.MustNew("testdata"), "thispathdoesntexist") ign, err = f.MatchesPath("i'm included") require.NoError(t, err) assert.False(t, ign) @@ -41,7 +42,7 @@ func TestIgnoreFileTaint(t *testing.T) { gitIgnorePath := filepath.Join(tempDir, ".gitignore") // Files that don't exist are treated as an empty gitignore file. - f := newIgnoreFile(gitIgnorePath) + f := newIgnoreFile(vfs.MustNew(tempDir), ".gitignore") ign, err = f.MatchesPath("hello") require.NoError(t, err) assert.False(t, ign) diff --git a/libs/git/reference.go b/libs/git/reference.go index 4021f2e60..2165a9cda 100644 --- a/libs/git/reference.go +++ b/libs/git/reference.go @@ -1,11 +1,13 @@ package git import ( + "errors" "fmt" - "os" - "path/filepath" + "io/fs" "regexp" "strings" + + "github.com/databricks/cli/libs/vfs" ) type ReferenceType string @@ -37,10 +39,10 @@ func isSHA1(s string) bool { return re.MatchString(s) } -func LoadReferenceFile(path string) (*Reference, error) { +func LoadReferenceFile(root vfs.Path, path string) (*Reference, error) { // read reference file content - b, err := os.ReadFile(path) - if os.IsNotExist(err) { + b, err := fs.ReadFile(root, path) + if errors.Is(err, fs.ErrNotExist) { return nil, nil } if err != nil { @@ -73,8 +75,7 @@ func (ref *Reference) ResolvePath() (string, error) { if ref.Type != ReferenceTypePointer { return "", ErrNotAReferencePointer } - refPath := strings.TrimPrefix(ref.Content, ReferencePrefix) - return filepath.FromSlash(refPath), nil + return strings.TrimPrefix(ref.Content, ReferencePrefix), nil } // resolves the name of the current branch from the reference file content. For example @@ -87,8 +88,6 @@ func (ref *Reference) CurrentBranch() (string, error) { if err != nil { return "", err } - // normalize branch ref path to work accross different operating systems - branchRefPath = filepath.ToSlash(branchRefPath) if !strings.HasPrefix(branchRefPath, HeadPathPrefix) { return "", fmt.Errorf("reference path %s does not have expected prefix %s", branchRefPath, HeadPathPrefix) } diff --git a/libs/git/reference_test.go b/libs/git/reference_test.go index 1b08e989b..194d79333 100644 --- a/libs/git/reference_test.go +++ b/libs/git/reference_test.go @@ -6,6 +6,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -45,7 +46,7 @@ func TestReferenceReferencePathForReference(t *testing.T) { } path, err := ref.ResolvePath() assert.NoError(t, err) - assert.Equal(t, filepath.FromSlash("refs/heads/my-branch"), path) + assert.Equal(t, "refs/heads/my-branch", path) } func TestReferenceLoadingForObjectID(t *testing.T) { @@ -55,7 +56,7 @@ func TestReferenceLoadingForObjectID(t *testing.T) { defer f.Close() f.WriteString(strings.Repeat("e", 40) + "\r\n") - ref, err := LoadReferenceFile(filepath.Join(tmp, "HEAD")) + ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) assert.Equal(t, ReferenceTypeSHA1, ref.Type) assert.Equal(t, strings.Repeat("e", 40), ref.Content) @@ -68,7 +69,7 @@ func TestReferenceLoadingForReference(t *testing.T) { defer f.Close() f.WriteString("ref: refs/heads/foo\n") - ref, err := LoadReferenceFile(filepath.Join(tmp, "HEAD")) + ref, err := LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.NoError(t, err) assert.Equal(t, ReferenceTypePointer, ref.Type) assert.Equal(t, "ref: refs/heads/foo", ref.Content) @@ -81,7 +82,7 @@ func TestReferenceLoadingFailsForInvalidContent(t *testing.T) { defer f.Close() f.WriteString("abc") - _, err = LoadReferenceFile(filepath.Join(tmp, "HEAD")) + _, err = LoadReferenceFile(vfs.MustNew(tmp), "HEAD") assert.ErrorContains(t, err, "unknown format for git HEAD") } diff --git a/libs/git/repository.go b/libs/git/repository.go index d1641118f..6940ddac8 100644 --- a/libs/git/repository.go +++ b/libs/git/repository.go @@ -1,13 +1,15 @@ package git import ( + "errors" "fmt" - "os" + "io/fs" + "net/url" "path" "path/filepath" "strings" - "github.com/databricks/cli/folders" + "github.com/databricks/cli/libs/vfs" ) const gitIgnoreFileName = ".gitignore" @@ -21,8 +23,8 @@ type Repository struct { // directory where we process .gitignore files. real bool - // rootPath is the absolute path to the repository root. - rootPath string + // root is the absolute path to the repository root. + root vfs.Path // ignore contains a list of ignore patterns indexed by the // path prefix relative to the repository root. @@ -42,12 +44,12 @@ type Repository struct { // Root returns the absolute path to the repository root. func (r *Repository) Root() string { - return r.rootPath + return r.root.Native() } func (r *Repository) CurrentBranch() (string, error) { // load .git/HEAD - ref, err := LoadReferenceFile(filepath.Join(r.rootPath, GitDirectoryName, "HEAD")) + ref, err := LoadReferenceFile(r.root, path.Join(GitDirectoryName, "HEAD")) if err != nil { return "", err } @@ -64,7 +66,7 @@ func (r *Repository) CurrentBranch() (string, error) { func (r *Repository) LatestCommit() (string, error) { // load .git/HEAD - ref, err := LoadReferenceFile(filepath.Join(r.rootPath, GitDirectoryName, "HEAD")) + ref, err := LoadReferenceFile(r.root, path.Join(GitDirectoryName, "HEAD")) if err != nil { return "", err } @@ -83,7 +85,7 @@ func (r *Repository) LatestCommit() (string, error) { if err != nil { return "", err } - branchHeadRef, err := LoadReferenceFile(filepath.Join(r.rootPath, GitDirectoryName, branchHeadPath)) + branchHeadRef, err := LoadReferenceFile(r.root, path.Join(GitDirectoryName, branchHeadPath)) if err != nil { return "", err } @@ -99,7 +101,22 @@ func (r *Repository) LatestCommit() (string, error) { // return origin url if it's defined, otherwise an empty string func (r *Repository) OriginUrl() string { - return r.config.variables["remote.origin.url"] + rawUrl := r.config.variables["remote.origin.url"] + + // Remove username and password from the URL. + parsedUrl, err := url.Parse(rawUrl) + if err != nil { + // Git supports https URLs and non standard URLs like "ssh://" or "file://". + // Parsing these URLs is not supported by the Go standard library. In case + // of an error, we return the raw URL. This is okay because for ssh URLs + // because passwords cannot be included in the URL. + return rawUrl + } + // Setting User to nil removes the username and password from the URL when + // .String() is called. + // See: https://pkg.go.dev/net/url#URL.String + parsedUrl.User = nil + return parsedUrl.String() } // loadConfig loads and combines user specific and repository specific configuration files. @@ -108,7 +125,7 @@ func (r *Repository) loadConfig() error { if err != nil { return fmt.Errorf("unable to load user specific gitconfig: %w", err) } - err = config.loadFile(filepath.Join(r.rootPath, ".git/config")) + err = config.loadFile(r.root, ".git/config") if err != nil { return fmt.Errorf("unable to load repository specific gitconfig: %w", err) } @@ -119,7 +136,7 @@ func (r *Repository) loadConfig() error { // newIgnoreFile constructs a new [ignoreRules] implementation backed by // a file using the specified path relative to the repository root. func (r *Repository) newIgnoreFile(relativeIgnoreFilePath string) ignoreRules { - return newIgnoreFile(filepath.Join(r.rootPath, relativeIgnoreFilePath)) + return newIgnoreFile(r.root, relativeIgnoreFilePath) } // getIgnoreRules returns a slice of [ignoreRules] that apply @@ -132,7 +149,7 @@ func (r *Repository) getIgnoreRules(prefix string) []ignoreRules { return fs } - r.ignore[prefix] = append(r.ignore[prefix], r.newIgnoreFile(filepath.Join(prefix, gitIgnoreFileName))) + r.ignore[prefix] = append(r.ignore[prefix], r.newIgnoreFile(path.Join(prefix, gitIgnoreFileName))) return r.ignore[prefix] } @@ -149,7 +166,7 @@ func (r *Repository) taintIgnoreRules() { // Ignore computes whether to ignore the specified path. // The specified path is relative to the repository root path. func (r *Repository) Ignore(relPath string) (bool, error) { - parts := strings.Split(filepath.ToSlash(relPath), "/") + parts := strings.Split(relPath, "/") // Retain trailing slash for directory patterns. // We know a trailing slash was present if the last element @@ -186,16 +203,11 @@ func (r *Repository) Ignore(relPath string) (bool, error) { return false, nil } -func NewRepository(path string) (*Repository, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - +func NewRepository(path vfs.Path) (*Repository, error) { real := true - rootPath, err := folders.FindDirWithLeaf(path, GitDirectoryName) + rootPath, err := vfs.FindLeafInTree(path, GitDirectoryName) if err != nil { - if !os.IsNotExist(err) { + if !errors.Is(err, fs.ErrNotExist) { return nil, err } // Cannot find `.git` directory. @@ -205,9 +217,9 @@ func NewRepository(path string) (*Repository, error) { } repo := &Repository{ - real: real, - rootPath: rootPath, - ignore: make(map[string][]ignoreRules), + real: real, + root: rootPath, + ignore: make(map[string][]ignoreRules), } err = repo.loadConfig() @@ -221,13 +233,21 @@ func NewRepository(path string) (*Repository, error) { return nil, fmt.Errorf("unable to access core excludes file: %w", err) } + // Load global excludes on this machine. + // This is by definition a local path so we create a new [vfs.Path] instance. + coreExcludes := newStringIgnoreRules([]string{}) + if coreExcludesPath != "" { + dir := filepath.Dir(coreExcludesPath) + base := filepath.Base(coreExcludesPath) + coreExcludes = newIgnoreFile(vfs.MustNew(dir), base) + } + // Initialize root ignore rules. // These are special and not lazily initialized because: // 1) we include a hardcoded ignore pattern // 2) we include a gitignore file at a non-standard path repo.ignore["."] = []ignoreRules{ - // Load global excludes on this machine. - newIgnoreFile(coreExcludesPath), + coreExcludes, // Always ignore root .git directory. newStringIgnoreRules([]string{ ".git", diff --git a/libs/git/repository_test.go b/libs/git/repository_test.go index fb0e38080..a28038eeb 100644 --- a/libs/git/repository_test.go +++ b/libs/git/repository_test.go @@ -7,6 +7,7 @@ import ( "strings" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -43,7 +44,7 @@ func newTestRepository(t *testing.T) *testRepository { _, err = f2.WriteString(`ref: refs/heads/main`) require.NoError(t, err) - repo, err := NewRepository(tmp) + repo, err := NewRepository(vfs.MustNew(tmp)) require.NoError(t, err) return &testRepository{ @@ -53,7 +54,7 @@ func newTestRepository(t *testing.T) *testRepository { } func (testRepo *testRepository) checkoutCommit(commitId string) { - f, err := os.OpenFile(filepath.Join(testRepo.r.rootPath, ".git", "HEAD"), os.O_WRONLY|os.O_TRUNC, os.ModePerm) + f, err := os.OpenFile(filepath.Join(testRepo.r.Root(), ".git", "HEAD"), os.O_WRONLY|os.O_TRUNC, os.ModePerm) require.NoError(testRepo.t, err) defer f.Close() @@ -63,7 +64,7 @@ func (testRepo *testRepository) checkoutCommit(commitId string) { func (testRepo *testRepository) addBranch(name string, latestCommit string) { // create dir for branch head reference - branchDir := filepath.Join(testRepo.r.rootPath, ".git", "refs", "heads") + branchDir := filepath.Join(testRepo.r.Root(), ".git", "refs", "heads") err := os.MkdirAll(branchDir, os.ModePerm) require.NoError(testRepo.t, err) @@ -78,7 +79,7 @@ func (testRepo *testRepository) addBranch(name string, latestCommit string) { } func (testRepo *testRepository) checkoutBranch(name string) { - f, err := os.OpenFile(filepath.Join(testRepo.r.rootPath, ".git", "HEAD"), os.O_WRONLY|os.O_TRUNC, os.ModePerm) + f, err := os.OpenFile(filepath.Join(testRepo.r.Root(), ".git", "HEAD"), os.O_WRONLY|os.O_TRUNC, os.ModePerm) require.NoError(testRepo.t, err) defer f.Close() @@ -89,7 +90,7 @@ func (testRepo *testRepository) checkoutBranch(name string) { // add remote origin url to test repo func (testRepo *testRepository) addOriginUrl(url string) { // open config in append mode - f, err := os.OpenFile(filepath.Join(testRepo.r.rootPath, ".git", "config"), os.O_WRONLY|os.O_APPEND, os.ModePerm) + f, err := os.OpenFile(filepath.Join(testRepo.r.Root(), ".git", "config"), os.O_WRONLY|os.O_APPEND, os.ModePerm) require.NoError(testRepo.t, err) defer f.Close() @@ -128,7 +129,7 @@ func (testRepo *testRepository) assertOriginUrl(expected string) { func TestRepository(t *testing.T) { // Load this repository as test. - repo, err := NewRepository("../..") + repo, err := NewRepository(vfs.MustNew("../..")) tr := testRepository{t, repo} require.NoError(t, err) @@ -142,7 +143,7 @@ func TestRepository(t *testing.T) { assert.True(t, tr.Ignore("vendor/")) // Check that ignores under testdata work. - assert.True(t, tr.Ignore(filepath.Join("libs", "git", "testdata", "root.ignoreme"))) + assert.True(t, tr.Ignore("libs/git/testdata/root.ignoreme")) } func TestRepositoryGitConfigForEmptyRepo(t *testing.T) { @@ -192,7 +193,7 @@ func TestRepositoryGitConfigForSshUrl(t *testing.T) { func TestRepositoryGitConfigWhenNotARepo(t *testing.T) { tmp := t.TempDir() - repo, err := NewRepository(tmp) + repo, err := NewRepository(vfs.MustNew(tmp)) require.NoError(t, err) branch, err := repo.CurrentBranch() @@ -206,3 +207,9 @@ func TestRepositoryGitConfigWhenNotARepo(t *testing.T) { originUrl := repo.OriginUrl() assert.Equal(t, "", originUrl) } + +func TestRepositoryOriginUrlRemovesUserCreds(t *testing.T) { + repo := newTestRepository(t) + repo.addOriginUrl("https://username:token@github.com/databricks/foobar.git") + repo.assertOriginUrl("https://github.com/databricks/foobar.git") +} diff --git a/libs/git/view.go b/libs/git/view.go index 3cb88d8b1..90eed0bb8 100644 --- a/libs/git/view.go +++ b/libs/git/view.go @@ -1,9 +1,13 @@ package git import ( + "fmt" "os" + "path" "path/filepath" "strings" + + "github.com/databricks/cli/libs/vfs" ) // View represents a view on a directory tree that takes into account @@ -29,17 +33,15 @@ type View struct { // Ignore computes whether to ignore the specified path. // The specified path is relative to the view's target path. -func (v *View) Ignore(path string) (bool, error) { - path = filepath.ToSlash(path) - +func (v *View) Ignore(relPath string) (bool, error) { // Retain trailing slash for directory patterns. // Needs special handling because it is removed by path cleaning. trailingSlash := "" - if strings.HasSuffix(path, "/") { + if strings.HasSuffix(relPath, "/") { trailingSlash = "/" } - return v.repo.Ignore(filepath.Join(v.targetPath, path) + trailingSlash) + return v.repo.Ignore(path.Join(v.targetPath, relPath) + trailingSlash) } // IgnoreFile returns if the gitignore rules in this fileset @@ -70,26 +72,27 @@ func (v *View) IgnoreDirectory(dir string) (bool, error) { return v.Ignore(dir + "/") } -func NewView(path string) (*View, error) { - path, err := filepath.Abs(path) - if err != nil { - return nil, err - } - - repo, err := NewRepository(path) +func NewView(root vfs.Path) (*View, error) { + repo, err := NewRepository(root) if err != nil { return nil, err } // Target path must be relative to the repository root path. - targetPath, err := filepath.Rel(repo.rootPath, path) - if err != nil { - return nil, err + target := root.Native() + prefix := repo.root.Native() + if !strings.HasPrefix(target, prefix) { + return nil, fmt.Errorf("path %q is not within repository root %q", root.Native(), prefix) } + // Make target a relative path. + target = strings.TrimPrefix(target, prefix) + target = strings.TrimPrefix(target, string(os.PathSeparator)) + target = path.Clean(filepath.ToSlash(target)) + return &View{ repo: repo, - targetPath: targetPath, + targetPath: target, }, nil } diff --git a/libs/git/view_test.go b/libs/git/view_test.go index 3ecd301b5..76fba3458 100644 --- a/libs/git/view_test.go +++ b/libs/git/view_test.go @@ -7,6 +7,7 @@ import ( "path/filepath" "testing" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -89,19 +90,19 @@ func testViewAtRoot(t *testing.T, tv testView) { } func TestViewRootInBricksRepo(t *testing.T) { - v, err := NewView("./testdata") + v, err := NewView(vfs.MustNew("./testdata")) require.NoError(t, err) testViewAtRoot(t, testView{t, v}) } func TestViewRootInTempRepo(t *testing.T) { - v, err := NewView(createFakeRepo(t, "testdata")) + v, err := NewView(vfs.MustNew(createFakeRepo(t, "testdata"))) require.NoError(t, err) testViewAtRoot(t, testView{t, v}) } func TestViewRootInTempDir(t *testing.T) { - v, err := NewView(copyTestdata(t, "testdata")) + v, err := NewView(vfs.MustNew(copyTestdata(t, "testdata"))) require.NoError(t, err) testViewAtRoot(t, testView{t, v}) } @@ -124,20 +125,20 @@ func testViewAtA(t *testing.T, tv testView) { } func TestViewAInBricksRepo(t *testing.T) { - v, err := NewView("./testdata/a") + v, err := NewView(vfs.MustNew("./testdata/a")) require.NoError(t, err) testViewAtA(t, testView{t, v}) } func TestViewAInTempRepo(t *testing.T) { - v, err := NewView(filepath.Join(createFakeRepo(t, "testdata"), "a")) + v, err := NewView(vfs.MustNew(filepath.Join(createFakeRepo(t, "testdata"), "a"))) require.NoError(t, err) testViewAtA(t, testView{t, v}) } func TestViewAInTempDir(t *testing.T) { // Since this is not a fake repo it should not traverse up the tree. - v, err := NewView(filepath.Join(copyTestdata(t, "testdata"), "a")) + v, err := NewView(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a"))) require.NoError(t, err) tv := testView{t, v} @@ -174,20 +175,20 @@ func testViewAtAB(t *testing.T, tv testView) { } func TestViewABInBricksRepo(t *testing.T) { - v, err := NewView("./testdata/a/b") + v, err := NewView(vfs.MustNew("./testdata/a/b")) require.NoError(t, err) testViewAtAB(t, testView{t, v}) } func TestViewABInTempRepo(t *testing.T) { - v, err := NewView(filepath.Join(createFakeRepo(t, "testdata"), "a", "b")) + v, err := NewView(vfs.MustNew(filepath.Join(createFakeRepo(t, "testdata"), "a", "b"))) require.NoError(t, err) testViewAtAB(t, testView{t, v}) } func TestViewABInTempDir(t *testing.T) { // Since this is not a fake repo it should not traverse up the tree. - v, err := NewView(filepath.Join(copyTestdata(t, "testdata"), "a", "b")) + v, err := NewView(vfs.MustNew(filepath.Join(copyTestdata(t, "testdata"), "a", "b"))) tv := testView{t, v} require.NoError(t, err) @@ -214,7 +215,7 @@ func TestViewDoesNotChangeGitignoreIfCacheDirAlreadyIgnoredAtRoot(t *testing.T) // Since root .gitignore already has .databricks, there should be no edits // to root .gitignore - v, err := NewView(repoPath) + v, err := NewView(vfs.MustNew(repoPath)) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -234,7 +235,7 @@ func TestViewDoesNotChangeGitignoreIfCacheDirAlreadyIgnoredInSubdir(t *testing.T // Since root .gitignore already has .databricks, there should be no edits // to a/.gitignore - v, err := NewView(filepath.Join(repoPath, "a")) + v, err := NewView(vfs.MustNew(filepath.Join(repoPath, "a"))) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -252,7 +253,7 @@ func TestViewAddsGitignoreWithCacheDir(t *testing.T) { assert.NoError(t, err) // Since root .gitignore was deleted, new view adds .databricks to root .gitignore - v, err := NewView(repoPath) + v, err := NewView(vfs.MustNew(repoPath)) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -270,7 +271,7 @@ func TestViewAddsGitignoreWithCacheDirAtSubdir(t *testing.T) { require.NoError(t, err) // Since root .gitignore was deleted, new view adds .databricks to a/.gitignore - v, err := NewView(filepath.Join(repoPath, "a")) + v, err := NewView(vfs.MustNew(filepath.Join(repoPath, "a"))) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() @@ -287,7 +288,7 @@ func TestViewAddsGitignoreWithCacheDirAtSubdir(t *testing.T) { func TestViewAlwaysIgnoresCacheDir(t *testing.T) { repoPath := createFakeRepo(t, "testdata") - v, err := NewView(repoPath) + v, err := NewView(vfs.MustNew(repoPath)) require.NoError(t, err) err = v.EnsureValidGitIgnoreExists() diff --git a/libs/jsonschema/schema.go b/libs/jsonschema/schema.go index 967e2e9cd..f1e223ec7 100644 --- a/libs/jsonschema/schema.go +++ b/libs/jsonschema/schema.go @@ -6,6 +6,7 @@ import ( "os" "regexp" "slices" + "strings" "github.com/databricks/cli/internal/build" "golang.org/x/mod/semver" @@ -81,6 +82,41 @@ func (s *Schema) ParseString(v string) (any, error) { return fromString(v, s.Type) } +func (s *Schema) getByPath(path string) (*Schema, error) { + p := strings.Split(path, ".") + + res := s + for _, node := range p { + if node == "*" { + res = res.AdditionalProperties.(*Schema) + continue + } + var ok bool + res, ok = res.Properties[node] + if !ok { + return nil, fmt.Errorf("property %q not found in schema. Query path: %s", node, path) + } + } + return res, nil +} + +func (s *Schema) GetByPath(path string) (Schema, error) { + v, err := s.getByPath(path) + if err != nil { + return Schema{}, err + } + return *v, nil +} + +func (s *Schema) SetByPath(path string, v Schema) error { + dst, err := s.getByPath(path) + if err != nil { + return err + } + *dst = v + return nil +} + type Type string const ( @@ -97,7 +133,7 @@ const ( func (schema *Schema) validateSchemaPropertyTypes() error { for _, v := range schema.Properties { switch v.Type { - case NumberType, BooleanType, StringType, IntegerType: + case NumberType, BooleanType, StringType, IntegerType, ObjectType, ArrayType: continue case "int", "int32", "int64": return fmt.Errorf("type %s is not a recognized json schema type. Please use \"integer\" instead", v.Type) diff --git a/libs/jsonschema/schema_test.go b/libs/jsonschema/schema_test.go index cf1f12767..c365cf235 100644 --- a/libs/jsonschema/schema_test.go +++ b/libs/jsonschema/schema_test.go @@ -4,6 +4,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSchemaValidateTypeNames(t *testing.T) { @@ -305,3 +306,92 @@ func TestValidateSchemaSkippedPropertiesHaveDefaults(t *testing.T) { err = s.validate() assert.NoError(t, err) } + +func testSchema() *Schema { + return &Schema{ + Type: "object", + Properties: map[string]*Schema{ + "int_val": { + Type: "integer", + Default: int64(123), + }, + "string_val": { + Type: "string", + }, + "object_val": { + Type: "object", + Properties: map[string]*Schema{ + "bar": { + Type: "string", + Default: "baz", + }, + }, + AdditionalProperties: &Schema{ + Type: "object", + Properties: map[string]*Schema{ + "foo": { + Type: "string", + Default: "zab", + }, + }, + }, + }, + }, + } + +} + +func TestSchemaGetByPath(t *testing.T) { + s := testSchema() + + ss, err := s.GetByPath("int_val") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: IntegerType, + Default: int64(123), + }, ss) + + ss, err = s.GetByPath("string_val") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: StringType, + }, ss) + + ss, err = s.GetByPath("object_val.bar") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: StringType, + Default: "baz", + }, ss) + + ss, err = s.GetByPath("object_val.*.foo") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: StringType, + Default: "zab", + }, ss) +} + +func TestSchemaSetByPath(t *testing.T) { + s := testSchema() + + err := s.SetByPath("int_val", Schema{ + Type: IntegerType, + Default: int64(456), + }) + require.NoError(t, err) + assert.Equal(t, int64(456), s.Properties["int_val"].Default) + + err = s.SetByPath("object_val.*.foo", Schema{ + Type: StringType, + Default: "zooby", + }) + require.NoError(t, err) + + ns, err := s.GetByPath("object_val.*.foo") + require.NoError(t, err) + assert.Equal(t, Schema{ + Type: StringType, + Default: "zooby", + }, ns) +} diff --git a/libs/notebook/detect.go b/libs/notebook/detect.go index 17685f3bf..582a88479 100644 --- a/libs/notebook/detect.go +++ b/libs/notebook/detect.go @@ -4,6 +4,7 @@ import ( "bufio" "bytes" "io" + "io/fs" "os" "path/filepath" "strings" @@ -11,44 +12,100 @@ import ( "github.com/databricks/databricks-sdk-go/service/workspace" ) +// FileInfoWithWorkspaceObjectInfo is an interface implemented by [fs.FileInfo] values that +// contain a file's underlying [workspace.ObjectInfo]. +// +// This may be the case when working with a [filer.Filer] backed by the workspace API. +// For these files we do not need to read a file's header to know if it is a notebook; +// we can use the [workspace.ObjectInfo] value directly. +type FileInfoWithWorkspaceObjectInfo interface { + WorkspaceObjectInfo() workspace.ObjectInfo +} + // Maximum length in bytes of the notebook header. const headerLength = 32 -// readHeader reads the first N bytes from a file. -func readHeader(path string) ([]byte, error) { - f, err := os.Open(path) +// file wraps an fs.File and implements a few helper methods such that +// they don't need to be inlined in the [DetectWithFS] function below. +type file struct { + f fs.File +} + +func openFile(fsys fs.FS, name string) (*file, error) { + f, err := fsys.Open(name) if err != nil { return nil, err } - defer f.Close() + return &file{f: f}, nil +} +func (f file) close() error { + return f.f.Close() +} + +func (f file) readHeader() (string, error) { // Scan header line with some padding. var buf = make([]byte, headerLength) - n, err := f.Read([]byte(buf)) + n, err := f.f.Read([]byte(buf)) if err != nil && err != io.EOF { - return nil, err + return "", err } // Trim buffer to actual read bytes. - return buf[:n], nil + buf = buf[:n] + + // Read the first line from the buffer. + scanner := bufio.NewScanner(bytes.NewReader(buf)) + scanner.Scan() + return scanner.Text(), nil +} + +// getObjectInfo returns the [workspace.ObjectInfo] for the file if it is +// part of the [fs.FileInfo] value returned by the [fs.Stat] call. +func (f file) getObjectInfo() (oi workspace.ObjectInfo, ok bool, err error) { + stat, err := f.f.Stat() + if err != nil { + return workspace.ObjectInfo{}, false, err + } + + // Use object info if available. + if i, ok := stat.(FileInfoWithWorkspaceObjectInfo); ok { + return i.WorkspaceObjectInfo(), true, nil + } + + return workspace.ObjectInfo{}, false, nil } // Detect returns whether the file at path is a Databricks notebook. // If it is, it returns the notebook language. -func Detect(path string) (notebook bool, language workspace.Language, err error) { +func DetectWithFS(fsys fs.FS, name string) (notebook bool, language workspace.Language, err error) { header := "" - buf, err := readHeader(path) + f, err := openFile(fsys, name) + if err != nil { + return false, "", err + } + + defer f.close() + + // Use object info if available. + oi, ok, err := f.getObjectInfo() + if err != nil { + return false, "", err + } + if ok { + return oi.ObjectType == workspace.ObjectTypeNotebook, oi.Language, nil + } + + // Read the first line of the file. + fileHeader, err := f.readHeader() if err != nil { return false, "", err } - scanner := bufio.NewScanner(bytes.NewReader(buf)) - scanner.Scan() - fileHeader := scanner.Text() // Determine which header to expect based on filename extension. - ext := strings.ToLower(filepath.Ext(path)) + ext := strings.ToLower(filepath.Ext(name)) switch ext { case ".py": header = `# Databricks notebook source` @@ -63,7 +120,7 @@ func Detect(path string) (notebook bool, language workspace.Language, err error) header = "-- Databricks notebook source" language = workspace.LanguageSql case ".ipynb": - return DetectJupyter(path) + return DetectJupyterWithFS(fsys, name) default: return false, "", nil } @@ -74,3 +131,11 @@ func Detect(path string) (notebook bool, language workspace.Language, err error) return true, language, nil } + +// Detect calls DetectWithFS with the local filesystem. +// The name argument may be a local relative path or a local absolute path. +func Detect(name string) (notebook bool, language workspace.Language, err error) { + d := filepath.ToSlash(filepath.Dir(name)) + b := filepath.Base(name) + return DetectWithFS(os.DirFS(d), b) +} diff --git a/libs/notebook/detect_jupyter.go b/libs/notebook/detect_jupyter.go index 7d96763cd..f631b5812 100644 --- a/libs/notebook/detect_jupyter.go +++ b/libs/notebook/detect_jupyter.go @@ -3,7 +3,9 @@ package notebook import ( "encoding/json" "fmt" + "io/fs" "os" + "path/filepath" "github.com/databricks/databricks-sdk-go/service/workspace" ) @@ -56,8 +58,8 @@ func resolveLanguage(nb *jupyter) workspace.Language { // DetectJupyter returns whether the file at path is a valid Jupyter notebook. // We assume it is valid if we can read it as JSON and see a couple expected fields. // If we cannot, importing into the workspace will always fail, so we also return an error. -func DetectJupyter(path string) (notebook bool, language workspace.Language, err error) { - f, err := os.Open(path) +func DetectJupyterWithFS(fsys fs.FS, name string) (notebook bool, language workspace.Language, err error) { + f, err := fsys.Open(name) if err != nil { return false, "", err } @@ -68,18 +70,26 @@ func DetectJupyter(path string) (notebook bool, language workspace.Language, err dec := json.NewDecoder(f) err = dec.Decode(&nb) if err != nil { - return false, "", fmt.Errorf("%s: error loading Jupyter notebook file: %w", path, err) + return false, "", fmt.Errorf("%s: error loading Jupyter notebook file: %w", name, err) } // Not a Jupyter notebook if the cells or metadata fields aren't defined. if nb.Cells == nil || nb.Metadata == nil { - return false, "", fmt.Errorf("%s: invalid Jupyter notebook file", path) + return false, "", fmt.Errorf("%s: invalid Jupyter notebook file", name) } // Major version must be at least 4. if nb.NbFormatMajor < 4 { - return false, "", fmt.Errorf("%s: unsupported Jupyter notebook version: %d", path, nb.NbFormatMajor) + return false, "", fmt.Errorf("%s: unsupported Jupyter notebook version: %d", name, nb.NbFormatMajor) } return true, resolveLanguage(&nb), nil } + +// DetectJupyter calls DetectJupyterWithFS with the local filesystem. +// The name argument may be a local relative path or a local absolute path. +func DetectJupyter(name string) (notebook bool, language workspace.Language, err error) { + d := filepath.ToSlash(filepath.Dir(name)) + b := filepath.Base(name) + return DetectJupyterWithFS(os.DirFS(d), b) +} diff --git a/libs/notebook/detect_test.go b/libs/notebook/detect_test.go index 5d3aa8a81..ad89d6dd5 100644 --- a/libs/notebook/detect_test.go +++ b/libs/notebook/detect_test.go @@ -1,6 +1,8 @@ package notebook import ( + "errors" + "io/fs" "os" "path/filepath" "testing" @@ -50,7 +52,7 @@ func TestDetectCallsDetectJupyter(t *testing.T) { func TestDetectUnknownExtension(t *testing.T) { _, _, err := Detect("./testdata/doesntexist.foobar") - assert.True(t, os.IsNotExist(err)) + assert.True(t, errors.Is(err, fs.ErrNotExist)) nb, _, err := Detect("./testdata/unknown_extension.foobar") require.NoError(t, err) @@ -59,7 +61,7 @@ func TestDetectUnknownExtension(t *testing.T) { func TestDetectNoExtension(t *testing.T) { _, _, err := Detect("./testdata/doesntexist") - assert.True(t, os.IsNotExist(err)) + assert.True(t, errors.Is(err, fs.ErrNotExist)) nb, _, err := Detect("./testdata/no_extension") require.NoError(t, err) @@ -97,3 +99,21 @@ func TestDetectFileWithLongHeader(t *testing.T) { require.NoError(t, err) assert.False(t, nb) } + +func TestDetectWithObjectInfo(t *testing.T) { + fakeFS := &fakeFS{ + fakeFile{ + fakeFileInfo{ + workspace.ObjectInfo{ + ObjectType: workspace.ObjectTypeNotebook, + Language: workspace.LanguagePython, + }, + }, + }, + } + + nb, lang, err := DetectWithFS(fakeFS, "doesntmatter") + require.NoError(t, err) + assert.True(t, nb) + assert.Equal(t, workspace.LanguagePython, lang) +} diff --git a/libs/notebook/fakefs_test.go b/libs/notebook/fakefs_test.go new file mode 100644 index 000000000..4ac135dd4 --- /dev/null +++ b/libs/notebook/fakefs_test.go @@ -0,0 +1,77 @@ +package notebook + +import ( + "fmt" + "io/fs" + "time" + + "github.com/databricks/databricks-sdk-go/service/workspace" +) + +type fakeFS struct { + fakeFile +} + +type fakeFile struct { + fakeFileInfo +} + +func (f fakeFile) Close() error { + return nil +} + +func (f fakeFile) Read(p []byte) (n int, err error) { + return 0, fmt.Errorf("not implemented") +} + +func (f fakeFile) Stat() (fs.FileInfo, error) { + return f.fakeFileInfo, nil +} + +type fakeFileInfo struct { + oi workspace.ObjectInfo +} + +func (f fakeFileInfo) WorkspaceObjectInfo() workspace.ObjectInfo { + return f.oi +} + +func (f fakeFileInfo) Name() string { + return "" +} + +func (f fakeFileInfo) Size() int64 { + return 0 +} + +func (f fakeFileInfo) Mode() fs.FileMode { + return 0 +} + +func (f fakeFileInfo) ModTime() time.Time { + return time.Time{} +} + +func (f fakeFileInfo) IsDir() bool { + return false +} + +func (f fakeFileInfo) Sys() any { + return nil +} + +func (f fakeFS) Open(name string) (fs.File, error) { + return f.fakeFile, nil +} + +func (f fakeFS) Stat(name string) (fs.FileInfo, error) { + panic("not implemented") +} + +func (f fakeFS) ReadDir(name string) ([]fs.DirEntry, error) { + panic("not implemented") +} + +func (f fakeFS) ReadFile(name string) ([]byte, error) { + panic("not implemented") +} diff --git a/libs/process/background_test.go b/libs/process/background_test.go index 5bf2400bc..2e47e814b 100644 --- a/libs/process/background_test.go +++ b/libs/process/background_test.go @@ -1,10 +1,10 @@ package process import ( + "bufio" "bytes" "context" "fmt" - "os" "os/exec" "strings" "testing" @@ -12,10 +12,21 @@ import ( "github.com/stretchr/testify/assert" ) +func splitLines(b []byte) (lines []string) { + scan := bufio.NewScanner(bytes.NewReader(b)) + for scan.Scan() { + line := scan.Text() + if line != "" { + lines = append(lines, line) + } + } + return lines +} + func TestBackgroundUnwrapsNotFound(t *testing.T) { ctx := context.Background() - _, err := Background(ctx, []string{"/bin/meeecho", "1"}) - assert.ErrorIs(t, err, os.ErrNotExist) + _, err := Background(ctx, []string{"meeecho", "1"}) + assert.ErrorIs(t, err, exec.ErrNotFound) } func TestBackground(t *testing.T) { @@ -46,7 +57,12 @@ func TestBackgroundCombinedOutput(t *testing.T) { }, WithCombinedOutput(&buf)) assert.NoError(t, err) assert.Equal(t, "2", strings.TrimSpace(res)) - assert.Equal(t, "1\n2\n", strings.ReplaceAll(buf.String(), "\r", "")) + + // The order of stdout and stderr being read into the buffer + // for combined output is not deterministic due to scheduling + // of the underlying goroutines that consume them. + // That's why this asserts on the contents and not the order. + assert.ElementsMatch(t, []string{"1", "2"}, splitLines(buf.Bytes())) } func TestBackgroundCombinedOutputFailure(t *testing.T) { @@ -66,10 +82,7 @@ func TestBackgroundCombinedOutputFailure(t *testing.T) { assert.Equal(t, "2", strings.TrimSpace(processErr.Stdout)) } assert.Equal(t, "2", strings.TrimSpace(res)) - - out := strings.ReplaceAll(buf.String(), "\r", "") - assert.Contains(t, out, "1\n") - assert.Contains(t, out, "2\n") + assert.ElementsMatch(t, []string{"1", "2"}, splitLines(buf.Bytes())) } func TestBackgroundNoStdin(t *testing.T) { diff --git a/libs/process/opts.go b/libs/process/opts.go index e201c6668..9516e49ba 100644 --- a/libs/process/opts.go +++ b/libs/process/opts.go @@ -48,6 +48,27 @@ func WithStdoutPipe(dst *io.ReadCloser) execOption { } } +func WithStdinReader(src io.Reader) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stdin = src + return nil + } +} + +func WithStderrWriter(dst io.Writer) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stderr = dst + return nil + } +} + +func WithStdoutWriter(dst io.Writer) execOption { + return func(_ context.Context, c *exec.Cmd) error { + c.Stdout = dst + return nil + } +} + func WithCombinedOutput(buf *bytes.Buffer) execOption { return func(_ context.Context, c *exec.Cmd) error { c.Stdout = io.MultiWriter(buf, c.Stdout) diff --git a/libs/sync/diff.go b/libs/sync/diff.go index 074bfc56c..e91f7277e 100644 --- a/libs/sync/diff.go +++ b/libs/sync/diff.go @@ -2,7 +2,6 @@ package sync import ( "path" - "path/filepath" "golang.org/x/exp/maps" ) @@ -64,7 +63,7 @@ func (d *diff) addFilesWithRemoteNameChanged(after *SnapshotState, before *Snaps func (d *diff) addNewFiles(after *SnapshotState, before *SnapshotState) { for localName := range after.LastModifiedTimes { if _, ok := before.LastModifiedTimes[localName]; !ok { - d.put = append(d.put, filepath.ToSlash(localName)) + d.put = append(d.put, localName) } } @@ -79,7 +78,7 @@ func (d *diff) addUpdatedFiles(after *SnapshotState, before *SnapshotState) { for localName, modTime := range after.LastModifiedTimes { prevModTime, ok := before.LastModifiedTimes[localName] if ok && modTime.After(prevModTime) { - d.put = append(d.put, filepath.ToSlash(localName)) + d.put = append(d.put, localName) } } } diff --git a/libs/sync/dirset.go b/libs/sync/dirset.go index 3c37c97cf..33b85cb8e 100644 --- a/libs/sync/dirset.go +++ b/libs/sync/dirset.go @@ -2,7 +2,6 @@ package sync import ( "path" - "path/filepath" "sort" ) @@ -16,8 +15,8 @@ func MakeDirSet(files []string) DirSet { // Iterate over all files. for _, f := range files { - // Get the directory of the file in /-separated form. - dir := filepath.ToSlash(filepath.Dir(f)) + // Get the directory of the file. + dir := path.Dir(f) // Add this directory and its parents until it is either "." or already in the set. for dir != "." { diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index f9956962e..f2920d8c2 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -3,7 +3,9 @@ package sync import ( "context" "encoding/json" + "errors" "fmt" + "io/fs" "os" "path/filepath" "time" @@ -33,7 +35,7 @@ const LatestSnapshotVersion = "v1" type Snapshot struct { // Path where this snapshot was loaded from and will be saved to. // Intentionally not part of the snapshot state because it may be moved by the user. - SnapshotPath string `json:"-"` + snapshotPath string // New indicates if this is a fresh snapshot or if it was loaded from disk. New bool `json:"-"` @@ -53,6 +55,30 @@ type Snapshot struct { const syncSnapshotDirName = "sync-snapshots" +func NewSnapshot(localFiles []fileset.File, opts *SyncOptions) (*Snapshot, error) { + snapshotPath, err := SnapshotPath(opts) + if err != nil { + return nil, err + } + + snapshotState, err := NewSnapshotState(localFiles) + if err != nil { + return nil, err + } + + // Reset last modified times to make sure all files are synced + snapshotState.ResetLastModifiedTimes() + + return &Snapshot{ + snapshotPath: snapshotPath, + New: true, + Version: LatestSnapshotVersion, + Host: opts.Host, + RemotePath: opts.RemotePath, + SnapshotState: snapshotState, + }, nil +} + func GetFileName(host, remotePath string) string { hash := md5.Sum([]byte(host + remotePath)) hashString := hex.EncodeToString(hash[:]) @@ -64,7 +90,7 @@ func GetFileName(host, remotePath string) string { // precisely it's the first 16 characters of md5(concat(host, remotePath)) func SnapshotPath(opts *SyncOptions) (string, error) { snapshotDir := filepath.Join(opts.SnapshotBasePath, syncSnapshotDirName) - if _, err := os.Stat(snapshotDir); os.IsNotExist(err) { + if _, err := os.Stat(snapshotDir); errors.Is(err, fs.ErrNotExist) { err = os.MkdirAll(snapshotDir, 0755) if err != nil { return "", fmt.Errorf("failed to create config directory: %s", err) @@ -81,7 +107,7 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { } return &Snapshot{ - SnapshotPath: path, + snapshotPath: path, New: true, Version: LatestSnapshotVersion, @@ -96,7 +122,7 @@ func newSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { } func (s *Snapshot) Save(ctx context.Context) error { - f, err := os.OpenFile(s.SnapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) + f, err := os.OpenFile(s.snapshotPath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0644) if err != nil { return fmt.Errorf("failed to create/open persisted sync snapshot file: %s", err) } @@ -114,14 +140,6 @@ func (s *Snapshot) Save(ctx context.Context) error { return nil } -func (s *Snapshot) Destroy(ctx context.Context) error { - err := os.Remove(s.SnapshotPath) - if err != nil && !os.IsNotExist(err) { - return fmt.Errorf("failed to destroy sync snapshot file: %s", err) - } - return nil -} - func loadOrNewSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error) { snapshot, err := newSnapshot(ctx, opts) if err != nil { @@ -129,11 +147,11 @@ func loadOrNewSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error } // Snapshot file not found. We return the new copy. - if _, err := os.Stat(snapshot.SnapshotPath); os.IsNotExist(err) { + if _, err := os.Stat(snapshot.snapshotPath); errors.Is(err, fs.ErrNotExist) { return snapshot, nil } - bytes, err := os.ReadFile(snapshot.SnapshotPath) + bytes, err := os.ReadFile(snapshot.snapshotPath) if err != nil { return nil, fmt.Errorf("failed to read sync snapshot from disk: %s", err) } @@ -156,6 +174,11 @@ func loadOrNewSnapshot(ctx context.Context, opts *SyncOptions) (*Snapshot, error return nil, fmt.Errorf("failed to json unmarshal persisted snapshot: %s", err) } + // Ensure that all paths are slash-separated upon loading + // an existing snapshot file. If it was created by an older + // CLI version (<= v0.220.0), it may contain backslashes. + snapshot.SnapshotState = snapshot.SnapshotState.ToSlash() + snapshot.New = false return snapshot, nil } @@ -168,7 +191,7 @@ func (s *Snapshot) diff(ctx context.Context, all []fileset.File) (diff, error) { currentState := s.SnapshotState if err := currentState.validate(); err != nil { - return diff{}, fmt.Errorf("error parsing existing sync state. Please delete your existing sync snapshot file (%s) and retry: %w", s.SnapshotPath, err) + return diff{}, fmt.Errorf("error parsing existing sync state. Please delete your existing sync snapshot file (%s) and retry: %w", s.snapshotPath, err) } // Compute diff to apply to get from current state to new target state. diff --git a/libs/sync/snapshot_state.go b/libs/sync/snapshot_state.go index 575063521..09bb5b63e 100644 --- a/libs/sync/snapshot_state.go +++ b/libs/sync/snapshot_state.go @@ -2,12 +2,12 @@ package sync import ( "fmt" + "path" "path/filepath" "strings" "time" "github.com/databricks/cli/libs/fileset" - "github.com/databricks/cli/libs/notebook" ) // SnapshotState keeps track of files on the local filesystem and their corresponding @@ -46,17 +46,19 @@ func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { } // Compute the new state. - for _, f := range localFiles { + for k := range localFiles { + f := &localFiles[k] // Compute the remote name the file will have in WSFS - remoteName := filepath.ToSlash(f.Relative) - isNotebook, _, err := notebook.Detect(f.Absolute) + remoteName := f.Relative + isNotebook, err := f.IsNotebook() + if err != nil { // Ignore this file if we're unable to determine the notebook type. // Trying to upload such a file to the workspace would fail anyway. continue } if isNotebook { - ext := filepath.Ext(remoteName) + ext := path.Ext(remoteName) remoteName = strings.TrimSuffix(remoteName, ext) } @@ -72,6 +74,12 @@ func NewSnapshotState(localFiles []fileset.File) (*SnapshotState, error) { return fs, nil } +func (fs *SnapshotState) ResetLastModifiedTimes() { + for k := range fs.LastModifiedTimes { + fs.LastModifiedTimes[k] = time.Unix(0, 0) + } +} + // Consistency checks for the sync files state representation. These are invariants // that downstream code for computing changes to apply to WSFS depends on. // @@ -112,3 +120,30 @@ func (fs *SnapshotState) validate() error { } return nil } + +// ToSlash ensures all local paths in the snapshot state +// are slash-separated. Returns a new snapshot state. +func (old SnapshotState) ToSlash() *SnapshotState { + new := SnapshotState{ + LastModifiedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + } + + // Keys are local paths. + for k, v := range old.LastModifiedTimes { + new.LastModifiedTimes[filepath.ToSlash(k)] = v + } + + // Keys are local paths. + for k, v := range old.LocalToRemoteNames { + new.LocalToRemoteNames[filepath.ToSlash(k)] = v + } + + // Values are remote paths. + for k, v := range old.RemoteToLocalNames { + new.RemoteToLocalNames[k] = filepath.ToSlash(v) + } + + return &new +} diff --git a/libs/sync/snapshot_state_test.go b/libs/sync/snapshot_state_test.go index bfcdbef65..92c14e8e0 100644 --- a/libs/sync/snapshot_state_test.go +++ b/libs/sync/snapshot_state_test.go @@ -1,25 +1,27 @@ package sync import ( + "runtime" "testing" "time" "github.com/databricks/cli/libs/fileset" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) func TestSnapshotState(t *testing.T) { - fileSet := fileset.New("./testdata/sync-fileset") + fileSet := fileset.New(vfs.MustNew("./testdata/sync-fileset")) files, err := fileSet.All() require.NoError(t, err) // Assert initial contents of the fileset assert.Len(t, files, 4) - assert.Equal(t, "invalid-nb.ipynb", files[0].Name()) - assert.Equal(t, "my-nb.py", files[1].Name()) - assert.Equal(t, "my-script.py", files[2].Name()) - assert.Equal(t, "valid-nb.ipynb", files[3].Name()) + assert.Equal(t, "invalid-nb.ipynb", files[0].Relative) + assert.Equal(t, "my-nb.py", files[1].Relative) + assert.Equal(t, "my-script.py", files[2].Relative) + assert.Equal(t, "valid-nb.ipynb", files[3].Relative) // Assert snapshot state generated from the fileset. Note that the invalid notebook // has been ignored. @@ -114,3 +116,30 @@ func TestSnapshotStateValidationErrors(t *testing.T) { } assert.EqualError(t, s.validate(), "invalid sync state representation. Inconsistent values found. Remote file c points to a. Local file a points to b") } + +func TestSnapshotStateWithBackslashes(t *testing.T) { + if runtime.GOOS != "windows" { + t.Skip("Skipping test on non-Windows platform") + } + + now := time.Now() + s1 := &SnapshotState{ + LastModifiedTimes: map[string]time.Time{ + "foo\\bar.py": now, + }, + LocalToRemoteNames: map[string]string{ + "foo\\bar.py": "foo/bar", + }, + RemoteToLocalNames: map[string]string{ + "foo/bar": "foo\\bar.py", + }, + } + + assert.NoError(t, s1.validate()) + + s2 := s1.ToSlash() + assert.NoError(t, s1.validate()) + assert.Equal(t, map[string]time.Time{"foo/bar.py": now}, s2.LastModifiedTimes) + assert.Equal(t, map[string]string{"foo/bar.py": "foo/bar"}, s2.LocalToRemoteNames) + assert.Equal(t, map[string]string{"foo/bar": "foo/bar.py"}, s2.RemoteToLocalNames) +} diff --git a/libs/sync/snapshot_test.go b/libs/sync/snapshot_test.go index d6358d4a1..050b5d965 100644 --- a/libs/sync/snapshot_test.go +++ b/libs/sync/snapshot_test.go @@ -10,6 +10,7 @@ import ( "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/testfile" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -29,7 +30,7 @@ func TestDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -93,7 +94,7 @@ func TestSymlinkDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -124,7 +125,7 @@ func TestFolderDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -169,7 +170,7 @@ func TestPythonNotebookDiff(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -244,7 +245,7 @@ func TestErrorWhenIdenticalRemoteName(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ @@ -281,7 +282,7 @@ func TestNoErrorRenameWithIdenticalRemoteName(t *testing.T) { // Create temp project dir projectDir := t.TempDir() - fileSet, err := git.NewFileSet(projectDir) + fileSet, err := git.NewFileSet(vfs.MustNew(projectDir)) require.NoError(t, err) state := Snapshot{ SnapshotState: &SnapshotState{ diff --git a/libs/sync/sync.go b/libs/sync/sync.go index beb3f6a33..3d5bc61ec 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -10,12 +10,13 @@ import ( "github.com/databricks/cli/libs/git" "github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/set" + "github.com/databricks/cli/libs/vfs" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/iam" ) type SyncOptions struct { - LocalPath string + LocalPath vfs.Path RemotePath string Include []string Exclude []string @@ -54,6 +55,7 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) { if err != nil { return nil, err } + err = fileSet.EnsureValidGitIgnoreExists() if err != nil { return nil, err @@ -150,43 +152,48 @@ func (s *Sync) notifyComplete(ctx context.Context, d diff) { s.seq++ } -func (s *Sync) RunOnce(ctx context.Context) error { - files, err := getFileList(ctx, s) +// Upload all files in the file tree rooted at the local path configured in the +// SyncOptions to the remote path configured in the SyncOptions. +// +// Returns the list of files tracked (and synchronized) by the syncer during the run, +// and an error if any occurred. +func (s *Sync) RunOnce(ctx context.Context) ([]fileset.File, error) { + files, err := s.GetFileList(ctx) if err != nil { - return err + return files, err } change, err := s.snapshot.diff(ctx, files) if err != nil { - return err + return files, err } s.notifyStart(ctx, change) if change.IsEmpty() { s.notifyComplete(ctx, change) - return nil + return files, nil } err = s.applyDiff(ctx, change) if err != nil { - return err + return files, err } err = s.snapshot.Save(ctx) if err != nil { log.Errorf(ctx, "cannot store snapshot: %s", err) - return err + return files, err } s.notifyComplete(ctx, change) - return nil + return files, nil } -func getFileList(ctx context.Context, s *Sync) ([]fileset.File, error) { +func (s *Sync) GetFileList(ctx context.Context) ([]fileset.File, error) { // tradeoff: doing portable monitoring only due to macOS max descriptor manual ulimit setting requirement // https://github.com/gorakhargosh/watchdog/blob/master/src/watchdog/observers/kqueue.py#L394-L418 all := set.NewSetF(func(f fileset.File) string { - return f.Absolute + return f.Relative }) gitFiles, err := s.fileSet.All() if err != nil { @@ -216,14 +223,6 @@ func getFileList(ctx context.Context, s *Sync) ([]fileset.File, error) { return all.Iter(), nil } -func (s *Sync) DestroySnapshot(ctx context.Context) error { - return s.snapshot.Destroy(ctx) -} - -func (s *Sync) SnapshotPath() string { - return s.snapshot.SnapshotPath -} - func (s *Sync) RunContinuous(ctx context.Context) error { ticker := time.NewTicker(s.PollInterval) defer ticker.Stop() @@ -233,7 +232,7 @@ func (s *Sync) RunContinuous(ctx context.Context) error { case <-ctx.Done(): return ctx.Err() case <-ticker.C: - err := s.RunOnce(ctx) + _, err := s.RunOnce(ctx) if err != nil { return err } diff --git a/libs/sync/sync_test.go b/libs/sync/sync_test.go index 0f1ad61ba..292586e8d 100644 --- a/libs/sync/sync_test.go +++ b/libs/sync/sync_test.go @@ -8,6 +8,7 @@ import ( "github.com/databricks/cli/libs/fileset" "github.com/databricks/cli/libs/git" + "github.com/databricks/cli/libs/vfs" "github.com/stretchr/testify/require" ) @@ -73,16 +74,17 @@ func TestGetFileSet(t *testing.T) { ctx := context.Background() dir := setupFiles(t) - fileSet, err := git.NewFileSet(dir) + root := vfs.MustNew(dir) + fileSet, err := git.NewFileSet(root) require.NoError(t, err) err = fileSet.EnsureValidGitIgnoreExists() require.NoError(t, err) - inc, err := fileset.NewGlobSet(dir, []string{}) + inc, err := fileset.NewGlobSet(root, []string{}) require.NoError(t, err) - excl, err := fileset.NewGlobSet(dir, []string{}) + excl, err := fileset.NewGlobSet(root, []string{}) require.NoError(t, err) s := &Sync{ @@ -93,14 +95,14 @@ func TestGetFileSet(t *testing.T) { excludeFileSet: excl, } - fileList, err := getFileList(ctx, s) + fileList, err := s.GetFileList(ctx) require.NoError(t, err) require.Equal(t, len(fileList), 9) - inc, err = fileset.NewGlobSet(dir, []string{}) + inc, err = fileset.NewGlobSet(root, []string{}) require.NoError(t, err) - excl, err = fileset.NewGlobSet(dir, []string{"*.go"}) + excl, err = fileset.NewGlobSet(root, []string{"*.go"}) require.NoError(t, err) s = &Sync{ @@ -111,14 +113,14 @@ func TestGetFileSet(t *testing.T) { excludeFileSet: excl, } - fileList, err = getFileList(ctx, s) + fileList, err = s.GetFileList(ctx) require.NoError(t, err) require.Equal(t, len(fileList), 1) - inc, err = fileset.NewGlobSet(dir, []string{".databricks/*"}) + inc, err = fileset.NewGlobSet(root, []string{".databricks/*"}) require.NoError(t, err) - excl, err = fileset.NewGlobSet(dir, []string{}) + excl, err = fileset.NewGlobSet(root, []string{}) require.NoError(t, err) s = &Sync{ @@ -129,7 +131,7 @@ func TestGetFileSet(t *testing.T) { excludeFileSet: excl, } - fileList, err = getFileList(ctx, s) + fileList, err = s.GetFileList(ctx) require.NoError(t, err) require.Equal(t, len(fileList), 10) } @@ -138,16 +140,17 @@ func TestRecursiveExclude(t *testing.T) { ctx := context.Background() dir := setupFiles(t) - fileSet, err := git.NewFileSet(dir) + root := vfs.MustNew(dir) + fileSet, err := git.NewFileSet(root) require.NoError(t, err) err = fileSet.EnsureValidGitIgnoreExists() require.NoError(t, err) - inc, err := fileset.NewGlobSet(dir, []string{}) + inc, err := fileset.NewGlobSet(root, []string{}) require.NoError(t, err) - excl, err := fileset.NewGlobSet(dir, []string{"test/**"}) + excl, err := fileset.NewGlobSet(root, []string{"test/**"}) require.NoError(t, err) s := &Sync{ @@ -158,7 +161,7 @@ func TestRecursiveExclude(t *testing.T) { excludeFileSet: excl, } - fileList, err := getFileList(ctx, s) + fileList, err := s.GetFileList(ctx) require.NoError(t, err) require.Equal(t, len(fileList), 7) } diff --git a/libs/sync/watchdog.go b/libs/sync/watchdog.go index b0c96e01c..ca7ec46e9 100644 --- a/libs/sync/watchdog.go +++ b/libs/sync/watchdog.go @@ -4,8 +4,6 @@ import ( "context" "errors" "io/fs" - "os" - "path/filepath" "github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/log" @@ -59,7 +57,7 @@ func (s *Sync) applyMkdir(ctx context.Context, localName string) error { func (s *Sync) applyPut(ctx context.Context, localName string) error { s.notifyProgress(ctx, EventActionPut, localName, 0.0) - localFile, err := os.Open(filepath.Join(s.LocalPath, localName)) + localFile, err := s.LocalPath.Open(localName) if err != nil { return err } diff --git a/libs/template/config.go b/libs/template/config.go index 5dd038e01..5470aefeb 100644 --- a/libs/template/config.go +++ b/libs/template/config.go @@ -70,8 +70,14 @@ func validateSchema(schema *jsonschema.Schema) error { // Reads json file at path and assigns values from the file func (c *config) assignValuesFromFile(path string) error { - // Load the config file. + // It's valid to set additional properties in the config file that are not + // defined in the schema. They will be filtered below. Thus for the duration of + // the LoadInstance call, we disable the additional properties check, + // to allow those properties to be loaded. + c.schema.AdditionalProperties = true configFromFile, err := c.schema.LoadInstance(path) + c.schema.AdditionalProperties = false + if err != nil { return fmt.Errorf("failed to load config from file %s: %w", path, err) } @@ -79,6 +85,11 @@ func (c *config) assignValuesFromFile(path string) error { // Write configs from the file to the input map, not overwriting any existing // configurations. for name, val := range configFromFile { + // If a property is not defined in the schema, skip it. + if _, ok := c.schema.Properties[name]; !ok { + continue + } + // If a value is already assigned, keep the original value. if _, ok := c.values[name]; ok { continue } @@ -89,7 +100,10 @@ func (c *config) assignValuesFromFile(path string) error { // Assigns default values from schema to input config map func (c *config) assignDefaultValues(r *renderer) error { - for name, property := range c.schema.Properties { + for _, p := range c.schema.OrderedProperties() { + name := p.Name + property := p.Schema + // Config already has a value assigned if _, ok := c.values[name]; ok { continue diff --git a/libs/template/config_test.go b/libs/template/config_test.go index 847c2615b..1af2e5f5a 100644 --- a/libs/template/config_test.go +++ b/libs/template/config_test.go @@ -52,6 +52,17 @@ func TestTemplateConfigAssignValuesFromFileDoesNotOverwriteExistingConfigs(t *te assert.Equal(t, "this-is-not-overwritten", c.values["string_val"]) } +func TestTemplateConfigAssignValuesFromFileFiltersPropertiesNotInTheSchema(t *testing.T) { + c := testConfig(t) + + err := c.assignValuesFromFile("./testdata/config-assign-from-file-unknown-property/config.json") + assert.NoError(t, err) + + // assert only the known property is loaded + assert.Len(t, c.values, 1) + assert.Equal(t, "i am a known property", c.values["string_val"]) +} + func TestTemplateConfigAssignDefaultValues(t *testing.T) { c := testConfig(t) diff --git a/libs/template/helpers.go b/libs/template/helpers.go index 537fadb1e..b3dea329e 100644 --- a/libs/template/helpers.go +++ b/libs/template/helpers.go @@ -4,6 +4,7 @@ import ( "context" "errors" "fmt" + "math/rand" "net/url" "os" "regexp" @@ -11,6 +12,7 @@ import ( "github.com/databricks/cli/cmd/root" "github.com/databricks/cli/libs/auth" + "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/service/iam" ) @@ -29,6 +31,7 @@ type pair struct { var cachedUser *iam.User var cachedIsServicePrincipal *bool +var cachedCatalog *string func loadHelpers(ctx context.Context) template.FuncMap { w := root.WorkspaceClient(ctx) @@ -44,6 +47,10 @@ func loadHelpers(ctx context.Context) template.FuncMap { "regexp": func(expr string) (*regexp.Regexp, error) { return regexp.Compile(expr) }, + // Alias for https://pkg.go.dev/math/rand#Intn. Returns, as an int, a non-negative pseudo-random number in the half-open interval [0,n). + "random_int": func(n int) int { + return rand.Intn(n) + }, // A key value pair. This is used with the map function to generate maps // to use inside a template "pair": func(k string, v any) pair { @@ -108,6 +115,25 @@ func loadHelpers(ctx context.Context) template.FuncMap { } return auth.GetShortUserName(cachedUser.UserName), nil }, + // Get the default workspace catalog. If there is no default, or if + // Unity Catalog is not enabled, return an empty string. + "default_catalog": func() (string, error) { + if cachedCatalog == nil { + metastore, err := w.Metastores.Current(ctx) + if err != nil { + var aerr *apierr.APIError + if errors.As(err, &aerr) && aerr.ErrorCode == "METASTORE_DOES_NOT_EXIST" { + // Workspace doesn't have a metastore assigned, ignore error + empty_default := "" + cachedCatalog = &empty_default + return "", nil + } + return "", err + } + cachedCatalog = &metastore.DefaultCatalogName + } + return *cachedCatalog, nil + }, "is_service_principal": func() (bool, error) { if cachedIsServicePrincipal != nil { return *cachedIsServicePrincipal, nil @@ -119,7 +145,7 @@ func loadHelpers(ctx context.Context) template.FuncMap { return false, err } } - result := auth.IsServicePrincipal(cachedUser.Id) + result := auth.IsServicePrincipal(cachedUser.UserName) cachedIsServicePrincipal = &result return result, nil }, diff --git a/libs/template/helpers_test.go b/libs/template/helpers_test.go index d495ae895..c0848c8d0 100644 --- a/libs/template/helpers_test.go +++ b/libs/template/helpers_test.go @@ -3,6 +3,7 @@ package template import ( "context" "os" + "strconv" "strings" "testing" @@ -50,6 +51,24 @@ func TestTemplateRegexpCompileFunction(t *testing.T) { assert.Contains(t, content, "1:fool") } +func TestTemplateRandIntFunction(t *testing.T) { + ctx := context.Background() + tmpDir := t.TempDir() + + ctx = root.SetWorkspaceClient(ctx, nil) + helpers := loadHelpers(ctx) + r, err := newRenderer(ctx, nil, helpers, "./testdata/random-int/template", "./testdata/random-int/library", tmpDir) + require.NoError(t, err) + + err = r.walk() + assert.NoError(t, err) + + assert.Len(t, r.files, 1) + randInt, err := strconv.Atoi(strings.TrimSpace(string(r.files[0].(*inMemoryFile).content))) + assert.Less(t, randInt, 10) + assert.Empty(t, err) +} + func TestTemplateUrlFunction(t *testing.T) { ctx := context.Background() tmpDir := t.TempDir() @@ -111,7 +130,7 @@ func TestWorkspaceHost(t *testing.T) { func TestWorkspaceHostNotConfigured(t *testing.T) { ctx := context.Background() - cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "template") + cmd := cmdio.NewIO(flags.OutputJSON, strings.NewReader(""), os.Stdout, os.Stderr, "", "template") ctx = cmdio.InContext(ctx, cmd) tmpDir := t.TempDir() diff --git a/libs/template/materialize.go b/libs/template/materialize.go index 811ef9259..d824bf381 100644 --- a/libs/template/materialize.go +++ b/libs/template/materialize.go @@ -3,6 +3,7 @@ package template import ( "context" "embed" + "errors" "fmt" "io/fs" "os" @@ -44,7 +45,7 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st schemaPath := filepath.Join(templateRoot, schemaFileName) helpers := loadHelpers(ctx) - if _, err := os.Stat(schemaPath); os.IsNotExist(err) { + if _, err := os.Stat(schemaPath); errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("not a bundle template: expected to find a template schema file at %s", schemaPath) } @@ -53,12 +54,6 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st return err } - // Print welcome message - welcome := config.schema.WelcomeMessage - if welcome != "" { - cmdio.LogString(ctx, welcome) - } - // Read and assign config values from file if configFilePath != "" { err = config.assignValuesFromFile(configFilePath) @@ -72,6 +67,16 @@ func Materialize(ctx context.Context, configFilePath, templateRoot, outputDir st return err } + // Print welcome message + welcome := config.schema.WelcomeMessage + if welcome != "" { + welcome, err = r.executeTemplate(welcome) + if err != nil { + return err + } + cmdio.LogString(ctx, welcome) + } + // Prompt user for any missing config values. Assign default values if // terminal is not TTY err = config.promptOrAssignDefaultValues(r) diff --git a/libs/template/renderer.go b/libs/template/renderer.go index 6415cd84a..827f30133 100644 --- a/libs/template/renderer.go +++ b/libs/template/renderer.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "io/fs" "os" "path" "path/filepath" @@ -313,7 +314,7 @@ func (r *renderer) persistToDisk() error { if err == nil { return fmt.Errorf("failed to initialize template, one or more files already exist: %s", path) } - if err != nil && !os.IsNotExist(err) { + if err != nil && !errors.Is(err, fs.ErrNotExist) { return fmt.Errorf("error while verifying file %s does not already exist: %w", path, err) } } diff --git a/libs/template/renderer_test.go b/libs/template/renderer_test.go index 8d0c21010..a8678a525 100644 --- a/libs/template/renderer_test.go +++ b/libs/template/renderer_test.go @@ -14,9 +14,9 @@ import ( "github.com/databricks/cli/bundle" bundleConfig "github.com/databricks/cli/bundle/config" - "github.com/databricks/cli/bundle/config/mutator" "github.com/databricks/cli/bundle/phases" "github.com/databricks/cli/cmd/root" + "github.com/databricks/cli/libs/diag" "github.com/databricks/cli/libs/tags" "github.com/databricks/databricks-sdk-go" workspaceConfig "github.com/databricks/databricks-sdk-go/config" @@ -37,10 +37,10 @@ func assertFilePermissions(t *testing.T, path string, perm fs.FileMode) { assert.Equal(t, perm, info.Mode().Perm()) } -func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target string, isServicePrincipal bool, build bool, tempDir string) { +func assertBuiltinTemplateValid(t *testing.T, template string, settings map[string]any, target string, isServicePrincipal bool, build bool, tempDir string) { ctx := context.Background() - templatePath, err := prepareBuiltinTemplates("default-python", tempDir) + templatePath, err := prepareBuiltinTemplates(template, tempDir) require.NoError(t, err) libraryPath := filepath.Join(templatePath, "library") @@ -50,6 +50,9 @@ func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target st // Prepare helpers cachedUser = &iam.User{UserName: "user@domain.com"} + if isServicePrincipal { + cachedUser.UserName = "1d410060-a513-496f-a197-23cc82e5f46d" + } cachedIsServicePrincipal = &isServicePrincipal ctx = root.SetWorkspaceClient(ctx, w) helpers := loadHelpers(ctx) @@ -62,27 +65,33 @@ func assertBuiltinTemplateValid(t *testing.T, settings map[string]any, target st require.NoError(t, err) err = renderer.persistToDisk() require.NoError(t, err) + b, err := bundle.Load(ctx, filepath.Join(tempDir, "template", "my_project")) require.NoError(t, err) + diags := bundle.Apply(ctx, b, phases.LoadNamedTarget(target)) + require.NoError(t, diags.Error()) // Apply initialize / validation mutators - b.Config.Workspace.CurrentUser = &bundleConfig.User{User: cachedUser} + bundle.ApplyFunc(ctx, b, func(ctx context.Context, b *bundle.Bundle) diag.Diagnostics { + b.Config.Workspace.CurrentUser = &bundleConfig.User{User: cachedUser} + b.Config.Bundle.Terraform = &bundleConfig.Terraform{ + ExecPath: "sh", + } + return nil + }) + b.Tagging = tags.ForCloud(w.Config) b.WorkspaceClient() - b.Config.Bundle.Terraform = &bundleConfig.Terraform{ - ExecPath: "sh", - } - err = bundle.Apply(ctx, b, bundle.Seq( - bundle.Seq(mutator.DefaultMutators()...), - mutator.SelectTarget(target), + + diags = bundle.Apply(ctx, b, bundle.Seq( phases.Initialize(), )) - require.NoError(t, err) + require.NoError(t, diags.Error()) // Apply build mutator if build { - err = bundle.Apply(ctx, b, phases.Build()) - require.NoError(t, err) + diags = bundle.Apply(ctx, b, phases.Build()) + require.NoError(t, diags.Error()) } } @@ -98,10 +107,12 @@ func TestPrepareBuiltInTemplatesWithRelativePaths(t *testing.T) { assert.Equal(t, "./default-python", dir) } -func TestBuiltinTemplateValid(t *testing.T) { +func TestBuiltinPythonTemplateValid(t *testing.T) { // Test option combinations options := []string{"yes", "no"} isServicePrincipal := false + catalog := "hive_metastore" + cachedCatalog = &catalog build := false for _, includeNotebook := range options { for _, includeDlt := range options { @@ -114,7 +125,7 @@ func TestBuiltinTemplateValid(t *testing.T) { "include_python": includePython, } tempDir := t.TempDir() - assertBuiltinTemplateValid(t, config, "dev", isServicePrincipal, build, tempDir) + assertBuiltinTemplateValid(t, "default-python", config, "dev", isServicePrincipal, build, tempDir) } } } @@ -136,10 +147,46 @@ func TestBuiltinTemplateValid(t *testing.T) { require.NoError(t, err) defer os.RemoveAll(tempDir) - assertBuiltinTemplateValid(t, config, "prod", isServicePrincipal, build, tempDir) + assertBuiltinTemplateValid(t, "default-python", config, "prod", isServicePrincipal, build, tempDir) defer os.RemoveAll(tempDir) } +func TestBuiltinSQLTemplateValid(t *testing.T) { + for _, personal_schemas := range []string{"yes", "no"} { + for _, target := range []string{"dev", "prod"} { + for _, isServicePrincipal := range []bool{true, false} { + config := map[string]any{ + "project_name": "my_project", + "http_path": "/sql/1.0/warehouses/123abc", + "default_catalog": "users", + "shared_schema": "lennart", + "personal_schemas": personal_schemas, + } + build := false + assertBuiltinTemplateValid(t, "default-sql", config, target, isServicePrincipal, build, t.TempDir()) + } + } + } +} + +func TestBuiltinDbtTemplateValid(t *testing.T) { + for _, personal_schemas := range []string{"yes", "no"} { + for _, target := range []string{"dev", "prod"} { + for _, isServicePrincipal := range []bool{true, false} { + config := map[string]any{ + "project_name": "my_project", + "http_path": "/sql/1.0/warehouses/123", + "default_catalog": "hive_metastore", + "personal_schemas": personal_schemas, + "shared_schema": "lennart", + } + build := false + assertBuiltinTemplateValid(t, "dbt-sql", config, target, isServicePrincipal, build, t.TempDir()) + } + } + } +} + func TestRendererWithAssociatedTemplateInLibrary(t *testing.T) { tmpDir := t.TempDir() diff --git a/libs/template/templates/dbt-sql/README.md b/libs/template/templates/dbt-sql/README.md new file mode 100644 index 000000000..4ccacab10 --- /dev/null +++ b/libs/template/templates/dbt-sql/README.md @@ -0,0 +1,9 @@ +# dbt template + +This folder provides a template for using dbt-core with Databricks Asset Bundles. +It leverages dbt-core for local development and relies on Databricks Asset Bundles +for deployment (either manually or with CI/CD). In production, +dbt is executed using Databricks Workflows. + +* Learn more about the dbt and its standard project structure here: https://docs.getdbt.com/docs/build/projects. +* Learn more about Databricks Asset Bundles here: https://docs.databricks.com/en/dev-tools/bundles/index.html diff --git a/libs/template/templates/dbt-sql/databricks_template_schema.json b/libs/template/templates/dbt-sql/databricks_template_schema.json new file mode 100644 index 000000000..cccf145dc --- /dev/null +++ b/libs/template/templates/dbt-sql/databricks_template_schema.json @@ -0,0 +1,53 @@ +{ + "welcome_message": "\nWelcome to the dbt template for Databricks Asset Bundles!\n\nA workspace was selected based on your current profile. For information about how to change this, see https://docs.databricks.com/dev-tools/cli/profiles.html.\nworkspace_host: {{workspace_host}}", + "properties": { + "project_name": { + "type": "string", + "pattern": "^[A-Za-z_][A-Za-z0-9-_]+$", + "pattern_match_failure_message": "Name must consist of letters, numbers, dashes, and underscores.", + "default": "dbt_project", + "description": "\nPlease provide a unique name for this project.\nproject_name", + "order": 1 + }, + "http_path": { + "type": "string", + "pattern": "^/sql/.\\../warehouses/[a-z0-9]+$", + "pattern_match_failure_message": "Path must be of the form /sql/1.0/warehouses/", + "description": "\nPlease provide the HTTP Path of the SQL warehouse you would like to use with dbt during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", + "order": 2 + }, + "default_catalog": { + "type": "string", + "default": "{{default_catalog}}", + "pattern": "^\\w*$", + "pattern_match_failure_message": "Invalid catalog name.", + "description": "\nPlease provide an initial catalog{{if eq (default_catalog) \"\"}} (leave blank when not using Unity Catalog){{end}}.\ndefault_catalog", + "order": 3 + }, + "personal_schemas": { + "type": "string", + "description": "\nWould you like to use a personal schema for each user working on this project? (e.g., 'catalog.{{short_name}}')\npersonal_schemas", + "enum": [ + "yes, use a schema based on the current user name during development", + "no, use a shared schema during development" + ], + "order": 4 + }, + "shared_schema": { + "skip_prompt_if": { + "properties": { + "personal_schemas": { + "const": "yes, use a schema based on the current user name during development" + } + } + }, + "type": "string", + "default": "default", + "pattern": "^\\w+$", + "pattern_match_failure_message": "Invalid schema name.", + "description": "\nPlease provide an initial schema during development.\ndefault_schema", + "order": 5 + } + }, + "success_message": "\n📊 Your new project has been created in the '{{.project_name}}' directory!\nIf you already have dbt installed, just type 'cd {{.project_name}}; dbt init' to get started.\nRefer to the README.md file for full \"getting started\" guide and production setup instructions.\n" +} diff --git a/libs/template/templates/dbt-sql/library/versions.tmpl b/libs/template/templates/dbt-sql/library/versions.tmpl new file mode 100644 index 000000000..f9a879d25 --- /dev/null +++ b/libs/template/templates/dbt-sql/library/versions.tmpl @@ -0,0 +1,7 @@ +{{define "latest_lts_dbr_version" -}} + 13.3.x-scala2.12 +{{- end}} + +{{define "latest_lts_db_connect_version_spec" -}} + >=13.3,<13.4 +{{- end}} diff --git a/libs/template/templates/dbt-sql/template/__preamble.tmpl b/libs/template/templates/dbt-sql/template/__preamble.tmpl new file mode 100644 index 000000000..b770b5ef9 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/__preamble.tmpl @@ -0,0 +1,9 @@ +# Preamble + +This file only template directives; it is skipped for the actual output. + +{{skip "__preamble"}} + +{{if eq .project_name "dbt"}} +{{fail "Project name 'dbt' is not supported"}} +{{end}} \ No newline at end of file diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/__builtins__.pyi b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/__builtins__.pyi new file mode 100644 index 000000000..0edd5181b --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/__builtins__.pyi @@ -0,0 +1,3 @@ +# Typings for Pylance in Visual Studio Code +# see https://github.com/microsoft/pyright/blob/main/docs/builtins.md +from databricks.sdk.runtime import * diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/extensions.json b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/extensions.json new file mode 100644 index 000000000..28fe943fd --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/extensions.json @@ -0,0 +1,6 @@ +{ + "recommendations": [ + "redhat.vscode-yaml", + "innoverio.vscode-dbt-power-user", + ] +} diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl new file mode 100644 index 000000000..562ba136f --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -0,0 +1,33 @@ +{ + "python.analysis.stubPath": ".vscode", + "databricks.python.envFile": "${workspaceFolder}/.env", + "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", + "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", + "python.testing.pytestArgs": [ + "." + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "python.analysis.extraPaths": ["src"], + "files.exclude": { + "**/*.egg-info": true, + "**/__pycache__": true, + ".pytest_cache": true, + }, + "python.envFile": "${workspaceFolder}/.databricks/.databricks.env", + "python.defaultInterpreterPath": "${workspaceFolder}/.venv/bin/python", + "sqltools.connections": [ + { + "connectionMethod": "VS Code Extension (beta)", + "catalog": "hive_metastore", + "previewLimit": 50, + "driver": "Databricks", + "name": "databricks", + "path": "{{.http_path}}" + } + ], + "sqltools.autoConnectTo": "", + "[jinja-sql]": { + "editor.defaultFormatter": "innoverio.vscode-dbt-power-user" + } +} diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl new file mode 100644 index 000000000..dbf8a8d85 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/README.md.tmpl @@ -0,0 +1,138 @@ +# {{.project_name}} + +The '{{.project_name}}' project was generated by using the dbt template for +Databricks Asset Bundles. It follows the standard dbt project structure +and has an additional `resources` directory to define Databricks resources such as jobs +that run dbt models. + +* Learn more about dbt and its standard project structure here: https://docs.getdbt.com/docs/build/projects. +* Learn more about Databricks Asset Bundles here: https://docs.databricks.com/en/dev-tools/bundles/index.html + +The remainder of this file includes instructions for local development (using dbt) +and deployment to production (using Databricks Asset Bundles). + +## Development setup + +1. Install the Databricks CLI from https://docs.databricks.com/dev-tools/cli/databricks-cli.html + +2. Authenticate to your Databricks workspace, if you have not done so already: + ``` + $ databricks configure + ``` + +3. Install dbt + + To install dbt, you need a recent version of Python. For the instructions below, + we assume `python3` refers to the Python version you want to use. On some systems, + you may need to refer to a different Python version, e.g. `python` or `/usr/bin/python`. + + Run these instructions from the `{{.project_name}}` directory. We recommend making + use of a Python virtual environment and installing dbt as follows: + + ``` + $ python3 -m venv .venv + $ . .venv/bin/activate + $ pip install -r requirements-dev.txt + ``` + +4. Initialize your dbt profile + + Use `dbt init` to initialize your profile. + + ``` + $ dbt init + ``` + + Note that dbt authentication uses personal access tokens by default + (see https://docs.databricks.com/dev-tools/auth/pat.html). + You can use OAuth as an alternative, but this currently requires manual configuration. + See https://github.com/databricks/dbt-databricks/blob/main/docs/oauth.md + for general instructions, or https://community.databricks.com/t5/technical-blog/using-dbt-core-with-oauth-on-azure-databricks/ba-p/46605 + for advice on setting up OAuth for Azure Databricks. + + To setup up additional profiles, such as a 'prod' profile, + see https://docs.getdbt.com/docs/core/connect-data-platform/connection-profiles. + +5. Activate dbt so it can be used from the terminal + + ``` + $ . .venv/bin/activate + ``` + +## Local development with dbt + +Use `dbt` to [run this project locally using a SQL warehouse](https://docs.databricks.com/partners/prep/dbt.html): + +``` +$ dbt seed +$ dbt run +``` + +(Did you get an error that the dbt command could not be found? You may need +to try the last step from the development setup above to re-activate +your Python virtual environment!) + + +To just evaluate a single model defined in a file called orders.sql, use: + +``` +$ dbt run --model orders +``` + +Use `dbt test` to run tests generated from yml files such as `models/schema.yml` +and any SQL tests from `tests/` + +``` +$ dbt test +``` + +## Production setup + +Your production dbt profiles are defined in dbt_profiles/profiles.yml. +These profiles define the default catalog, schema, and any other +target-specific settings. Read more about dbt profiles on Databricks at +https://docs.databricks.com/en/workflows/jobs/how-to/use-dbt-in-workflows.html#advanced-run-dbt-with-a-custom-profile. + +The target workspaces for staging and prod are defined in databricks.yml. +You can manually deploy based on these configurations (see below). +Or you can use CI/CD to automate deployment. See +https://docs.databricks.com/dev-tools/bundles/ci-cd.html for documentation +on CI/CD setup. + +## Manually deploying to Databricks with Databricks Asset Bundles + +Databricks Asset Bundles can be used to deploy to Databricks and to execute +dbt commands as a job using Databricks Workflows. See +https://docs.databricks.com/dev-tools/bundles/index.html to learn more. + +Use the Databricks CLI to deploy a development copy of this project to a workspace: + +``` +$ databricks bundle deploy --target dev +``` + +(Note that "dev" is the default target, so the `--target` parameter +is optional here.) + +This deploys everything that's defined for this project. +For example, the default template would deploy a job called +`[dev yourname] {{.project_name}}_job` to your workspace. +You can find that job by opening your workpace and clicking on **Workflows**. + +You can also deploy to your production target directly from the command-line. +The warehouse, catalog, and schema for that target are configured in databricks.yml. +When deploying to this target, note that the default job at resources/{{.project_name}}_job.yml +has a schedule set that runs every day. The schedule is paused when deploying in development mode +(see https://docs.databricks.com/dev-tools/bundles/deployment-modes.html). + +To deploy a production copy, type: + +``` +$ databricks bundle deploy --target prod +``` + +## IDE support + +Optionally, install developer tools such as the Databricks extension for Visual Studio Code from +https://docs.databricks.com/dev-tools/vscode-ext.html. Third-party extensions +related to dbt may further enhance your dbt development experience! diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl new file mode 100644 index 000000000..fdda03c0d --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/databricks.yml.tmpl @@ -0,0 +1,32 @@ +# This file defines the structure of this project and how it is deployed +# to production using Databricks Asset Bundles. +# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation. +bundle: + name: {{.project_name}} + +include: + - resources/*.yml + +# Deployment targets. +# The default schema, catalog, etc. for dbt are defined in dbt_profiles/profiles.yml +targets: + dev: + default: true + # We use 'mode: development' to indicate this is a personal development copy. + # Any job schedules and triggers are paused by default. + mode: development + workspace: + host: {{workspace_host}} + + prod: + mode: production + workspace: + host: {{workspace_host}} + # We always use /Users/{{user_name}} for all resources to make sure we only have a single copy. + root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target} + {{- if not is_service_principal}} + run_as: + # This runs as {{user_name}} in production. We could also use a service principal here + # using service_principal_name (see the Databricks documentation). + user_name: {{user_name}} + {{- end}} diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl new file mode 100644 index 000000000..e96931e2d --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_profiles/profiles.yml.tmpl @@ -0,0 +1,45 @@ +{{- $catalog := .default_catalog}} +{{- if eq .default_catalog ""}} +{{- $catalog = "\"\" # workspace default"}} +{{- end}} +# This file defines dbt profiles for deployed dbt jobs. +{{.project_name}}: + target: dev # default target + outputs: + + # Doing local development with the dbt CLI? + # Then you should create your own profile in your .dbt/profiles.yml using 'dbt init' + # (See README.md) + + # The default target when deployed with the Databricks CLI + # N.B. when you use dbt from the command line, it uses the profile from .dbt/profiles.yml + dev: + type: databricks + method: http + catalog: {{$catalog}} +{{- if (regexp "^yes").MatchString .personal_schemas}} + schema: "{{"{{"}} var('dev_schema') {{"}}"}}" +{{- else}} + schema: "{{.shared_schema}}" +{{- end}} + + http_path: {{.http_path}} + + # The workspace host / token are provided by Databricks + # see databricks.yml for the workspace host used for 'dev' + host: "{{"{{"}} env_var('DBT_HOST') {{"}}"}}" + token: "{{"{{"}} env_var('DBT_ACCESS_TOKEN') {{"}}"}}" + + # The production target when deployed with the Databricks CLI + prod: + type: databricks + method: http + catalog: {{$catalog}} + schema: {{.shared_schema}} + + http_path: {{.http_path}} + + # The workspace host / token are provided by Databricks + # see databricks.yml for the workspace host used for 'prod' + host: "{{"{{"}} env_var('DBT_HOST') {{"}}"}}" + token: "{{"{{"}} env_var('DBT_ACCESS_TOKEN') {{"}}"}}" diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_project.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_project.yml.tmpl new file mode 100644 index 000000000..11fbf051e --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/dbt_project.yml.tmpl @@ -0,0 +1,32 @@ +name: '{{.project_name}}' +version: '1.0.0' +config-version: 2 + +# This setting configures which "profile" dbt uses for this project. +profile: '{{.project_name}}' + +# These configurations specify where dbt should look for different types of files. +# For Databricks asset bundles, we put everything in src, as you may have +# non-dbt resources in your project. +model-paths: ["src/models"] +analysis-paths: ["src/analyses"] +test-paths: ["src/tests"] +seed-paths: ["src/seeds"] +macro-paths: ["src/macros"] +snapshot-paths: ["src/snapshots"] + +clean-targets: # directories to be removed by `dbt clean` + - "target" + - "dbt_packages" + +# Configuring models +# Full documentation: https://docs.getdbt.com/docs/configuring-models + +# In this example config, we tell dbt to build all models in the example/ +# directory as views by default. These settings can be overridden in the +# individual model files using the `{{"{{"}} config(...) {{"}}"}}` macro. +models: + {{.project_name}}: + # Config indicated by + and applies to all files under models/example/ + example: + +materialized: view diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/profile_template.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/profile_template.yml.tmpl new file mode 100644 index 000000000..1bab573f2 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/profile_template.yml.tmpl @@ -0,0 +1,37 @@ +# This file defines prompts with defaults for dbt initializaton. +# It is used when the `dbt init` command is invoked. +# +fixed: + type: databricks +prompts: + host: + default: {{(regexp "^https?://").ReplaceAllString workspace_host ""}} + token: + hint: 'personal access token to use, dapiXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX' + hide_input: true + http_path: + hint: 'HTTP path of SQL warehouse to use' + default: {{.http_path}} + {{- if eq .default_catalog ""}} + _choose_unity_catalog: + 'use the default workspace catalog (or do not use Unity Catalog)': + _fixed_catalog: null + 'specify a default catalog': + catalog: + hint: 'initial catalog' + {{- else}} + catalog: + hint: 'initial catalog' + default: {{.default_catalog}} + {{- end}} + schema: + {{- if (regexp "^yes").MatchString .personal_schemas}} + hint: 'personal schema where dbt will build objects during development, example: {{short_name}}' + {{- else}} + hint: 'default schema where dbt will build objects' + default: {{.shared_schema}} + {{- end}} + threads: + hint: 'threads to use during development, 1 or more' + type: 'int' + default: 4 diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt b/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt new file mode 100644 index 000000000..e6b861203 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/requirements-dev.txt @@ -0,0 +1,3 @@ +## requirements-dev.txt: dependencies for local development. + +dbt-databricks>=1.8.0,<2.0.0 diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl new file mode 100644 index 000000000..bad12c755 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/resources/{{.project_name}}_job.yml.tmpl @@ -0,0 +1,49 @@ +resources: + jobs: + {{.project_name}}_job: + name: {{.project_name}}_job + + schedule: + # Run every day at 9:27 AM + quartz_cron_expression: 21 27 9 * * ? + timezone_id: UTC + + email_notifications: + on_failure: + - {{user_name}} + + + tasks: + - task_key: dbt + + dbt_task: + project_directory: ../ + # The default schema, catalog, etc. are defined in ../dbt_profiles/profiles.yml + profiles_directory: dbt_profiles/ + commands: +{{- if (regexp "^yes").MatchString .personal_schemas}} + # The dbt commands to run (see also dbt_profiles/profiles.yml; dev_schema is used in the dev profile) + - 'dbt deps --target=${bundle.target}' + - 'dbt seed --target=${bundle.target} --vars "{ dev_schema: ${workspace.current_user.short_name} }"' + - 'dbt run --target=${bundle.target} --vars "{ dev_schema: ${workspace.current_user.short_name} }"' +{{- else}} + # The dbt commands to run (see also the dev/prod profiles in dbt_profiles/profiles.yml) + - 'dbt deps --target=${bundle.target}' + - 'dbt seed --target=${bundle.target}' + - 'dbt run --target=${bundle.target}' +{{- end}} + + libraries: + - pypi: + package: dbt-databricks>=1.8.0,<2.0.0 + + new_cluster: + spark_version: {{template "latest_lts_dbr_version"}} + node_type_id: {{smallest_node_type}} + data_security_mode: SINGLE_USER + num_workers: 0 + spark_conf: + spark.master: "local[*, 4]" + spark.databricks.cluster.profile: singleNode + custom_tags: + ResourceClass: SingleNode diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/analyses/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/analyses/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/macros/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/macros/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_daily.sql.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_daily.sql.tmpl new file mode 100644 index 000000000..a8b4c2f9a --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_daily.sql.tmpl @@ -0,0 +1,24 @@ +{{- if eq (default_catalog) ""}} +{{- /* This workspace might not have Unity Catalog, */}} +{{- /* so let's not show both materialized views and streaming tables. */}} +{{- /* They're not supported without Unity Catalog! */}} +-- This model file defines a table called 'orders_daily' +{{"{{"}} config(materialized = 'table') {{"}}"}} +{{- else}} +-- This model file defines a materialized view called 'orders_daily' +-- +-- Read more about materialized at https://docs.getdbt.com/reference/resource-configs/databricks-configs#materialized-views-and-streaming-tables +-- Current limitation: a "full refresh" is needed in case the definition below is changed; see https://github.com/databricks/dbt-databricks/issues/561. +{{"{{"}} config(materialized = 'materialized_view') {{"}}"}} +{{- end}} + +select order_date, count(*) AS number_of_orders + +from {{"{{"}} ref('orders_raw') {{"}}"}} + +-- During development, only process a smaller range of data +{% if target.name != 'prod' %} +where order_date >= '2019-08-01' and order_date < '2019-09-01' +{% endif %} + +group by order_date diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_raw.sql.tmpl b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_raw.sql.tmpl new file mode 100644 index 000000000..17e6a5bf3 --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/orders_raw.sql.tmpl @@ -0,0 +1,16 @@ +-- This model file defines a streaming table called 'orders_raw' +-- +-- The streaming table below ingests all JSON files in /databricks-datasets/retail-org/sales_orders/ +-- Read more about streaming tables at https://docs.getdbt.com/reference/resource-configs/databricks-configs#materialized-views-and-streaming-tables +-- Current limitation: a "full refresh" is needed in case the definition below is changed; see https://github.com/databricks/dbt-databricks/issues/561. +{{"{{"}} config(materialized = 'streaming_table') {{"}}"}} + +select + customer_name, + date(timestamp(from_unixtime(try_cast(order_datetime as bigint)))) as order_date, + order_number +from stream read_files( + "/databricks-datasets/retail-org/sales_orders/", + format => "json", + header => true +) diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml new file mode 100644 index 000000000..c64f1bfce --- /dev/null +++ b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/models/example/schema.yml @@ -0,0 +1,21 @@ + +version: 2 + +models: + - name: orders_raw + description: "Raw ingested orders" + columns: + - name: customer_name + description: "The name of a customer" + data_tests: + - unique + - not_null + + - name: orders_daily + description: "Number of orders by day" + columns: + - name: order_date + description: "The date on which orders took place" + data_tests: + - unique + - not_null diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/seeds/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/seeds/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/snapshots/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/snapshots/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/dbt-sql/template/{{.project_name}}/src/tests/.gitkeep b/libs/template/templates/dbt-sql/template/{{.project_name}}/src/tests/.gitkeep new file mode 100644 index 000000000..e69de29bb diff --git a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl index 476c1cd6c..5adade0b3 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/README.md.tmpl @@ -6,7 +6,7 @@ The '{{.project_name}}' project was generated by using the default-python templa 1. Install the Databricks CLI from https://docs.databricks.com/dev-tools/cli/databricks-cli.html -2. Authenticate to your Databricks workspace: +2. Authenticate to your Databricks workspace, if you have not done so already: ``` $ databricks configure ``` @@ -28,6 +28,11 @@ The '{{.project_name}}' project was generated by using the default-python templa $ databricks bundle deploy --target prod ``` + Note that the default job from the template has a schedule that runs every day + (defined in resources/{{.project_name}}_job.yml). The schedule + is paused when deploying in development mode (see + https://docs.databricks.com/dev-tools/bundles/deployment-modes.html). + 5. To run a job or pipeline, use the "run" command: ``` $ databricks bundle run diff --git a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl index ea432f8db..e3572326b 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/databricks.yml.tmpl @@ -19,7 +19,7 @@ targets: host: {{workspace_host}} ## Optionally, there could be a 'staging' target here. - ## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/index.html.) + ## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/ci-cd.html.) # # staging: # workspace: diff --git a/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl index 6da403219..93dd4c480 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/requirements-dev.txt.tmpl @@ -3,6 +3,9 @@ ## For defining dependencies used by jobs in Databricks Workflows, see ## https://docs.databricks.com/dev-tools/bundles/library-dependencies.html +## Add code completion support for DLT +databricks-dlt + ## pytest is the default package used for testing pytest diff --git a/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl index 04bb261cd..42164dff0 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/scratch/exploration.ipynb.tmpl @@ -1,5 +1,15 @@ { "cells": [ + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, { "cell_type": "code", "execution_count": null, @@ -22,7 +32,7 @@ "sys.path.append('../src')\n", "from {{.project_name}} import main\n", "\n", - "main.get_taxis().show(10)" + "main.get_taxis(spark).show(10)" {{else}} "spark.range(10)" {{end -}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl index 4f50294f6..b152e9a30 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/dlt_pipeline.ipynb.tmpl @@ -63,7 +63,7 @@ {{- if (eq .include_python "yes") }} "@dlt.view\n", "def taxi_raw():\n", - " return main.get_taxis()\n", + " return main.get_taxis(spark)\n", {{else}} "\n", "@dlt.view\n", diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl index 0ab61db2c..a228f8d18 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/notebook.ipynb.tmpl @@ -17,6 +17,16 @@ "This default notebook is executed using Databricks Workflows as defined in resources/{{.project_name}}_job.yml." ] }, + { + "cell_type": "code", + "execution_count": 2, + "metadata": {}, + "outputs": [], + "source": [ + "%load_ext autoreload\n", + "%autoreload 2" + ] + }, { "cell_type": "code", "execution_count": 0, @@ -37,7 +47,7 @@ {{- if (eq .include_python "yes") }} "from {{.project_name}} import main\n", "\n", - "main.get_taxis().show(10)" + "main.get_taxis(spark).show(10)" {{else}} "spark.range(10)" {{end -}} diff --git a/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl index 4fe5ac8f4..c514c6dc5 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/src/{{.project_name}}/main.py.tmpl @@ -1,16 +1,21 @@ -{{- /* -We use pyspark.sql rather than DatabricksSession.builder.getOrCreate() -for compatibility with older runtimes. With a new runtime, it's -equivalent to DatabricksSession.builder.getOrCreate(). -*/ -}} -from pyspark.sql import SparkSession +from pyspark.sql import SparkSession, DataFrame -def get_taxis(): - spark = SparkSession.builder.getOrCreate() +def get_taxis(spark: SparkSession) -> DataFrame: return spark.read.table("samples.nyctaxi.trips") + +# Create a new Databricks Connect session. If this fails, +# check that you have configured Databricks Connect correctly. +# See https://docs.databricks.com/dev-tools/databricks-connect.html. +def get_spark() -> SparkSession: + try: + from databricks.connect import DatabricksSession + return DatabricksSession.builder.getOrCreate() + except ImportError: + return SparkSession.builder.getOrCreate() + def main(): - get_taxis().show(5) + get_taxis(get_spark()).show(5) if __name__ == '__main__': main() diff --git a/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl index a7a6afe0a..fea2f3f66 100644 --- a/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl +++ b/libs/template/templates/default-python/template/{{.project_name}}/tests/main_test.py.tmpl @@ -1,21 +1,6 @@ -from databricks.connect import DatabricksSession -from pyspark.sql import SparkSession -from {{.project_name}} import main +from {{.project_name}}.main import get_taxis, get_spark -# Create a new Databricks Connect session. If this fails, -# check that you have configured Databricks Connect correctly. -# See https://docs.databricks.com/dev-tools/databricks-connect.html. -{{/* - The below works around a problematic error message from Databricks Connect. - The standard SparkSession is supported in all configurations (workspace, IDE, - all runtime versions, CLI). But on the CLI it currently gives a confusing - error message if SPARK_REMOTE is not set. We can't directly use - DatabricksSession.builder in main.py, so we're re-assigning it here so - everything works out of the box, even for CLI users who don't set SPARK_REMOTE. -*/}} -SparkSession.builder = DatabricksSession.builder -SparkSession.builder.getOrCreate() def test_main(): - taxis = main.get_taxis() + taxis = get_taxis(get_spark()) assert taxis.count() > 5 diff --git a/libs/template/templates/default-sql/README.md b/libs/template/templates/default-sql/README.md new file mode 100644 index 000000000..6b7140f07 --- /dev/null +++ b/libs/template/templates/default-sql/README.md @@ -0,0 +1,3 @@ +# sql template + +This folder provides a template for using SQL with Databricks Asset Bundles. diff --git a/libs/template/templates/default-sql/databricks_template_schema.json b/libs/template/templates/default-sql/databricks_template_schema.json new file mode 100644 index 000000000..329f91962 --- /dev/null +++ b/libs/template/templates/default-sql/databricks_template_schema.json @@ -0,0 +1,53 @@ +{ + "welcome_message": "\nWelcome to the default SQL template for Databricks Asset Bundles!\n\nA workspace was selected based on your current profile. For information about how to change this, see https://docs.databricks.com/dev-tools/cli/profiles.html.\nworkspace_host: {{workspace_host}}", + "properties": { + "project_name": { + "type": "string", + "default": "sql_project", + "description": "\nPlease provide a unique name for this project.\nproject_name", + "order": 1, + "pattern": "^[A-Za-z_][A-Za-z0-9-_]+$", + "pattern_match_failure_message": "Name must consist of letters, numbers, dashes, and underscores." + }, + "http_path": { + "type": "string", + "pattern": "^/sql/.\\../warehouses/[a-z0-9]+$", + "pattern_match_failure_message": "Path must be of the form /sql/1.0/warehouses/", + "description": "\nPlease provide the HTTP Path of the SQL warehouse you would like to use with dbt during development.\nYou can find this path by clicking on \"Connection details\" for your SQL warehouse.\nhttp_path [example: /sql/1.0/warehouses/abcdef1234567890]", + "order": 2 + }, + "default_catalog": { + "type": "string", + "default": "{{if eq (default_catalog) \"\"}}hive_metastore{{else}}{{default_catalog}}{{end}}", + "pattern": "^\\w*$", + "pattern_match_failure_message": "Invalid catalog name.", + "description": "\nPlease provide an initial catalog{{if eq (default_catalog) \"\"}} or metastore{{end}}.\ndefault_catalog", + "order": 3 + }, + "personal_schemas": { + "type": "string", + "description": "\nWould you like to use a personal schema for each user working on this project? (e.g., 'catalog.{{short_name}}')\npersonal_schemas", + "enum": [ + "yes, automatically use a schema based on the current user name during development", + "no, use a single schema for all users during development" + ], + "order": 4 + }, + "shared_schema": { + "skip_prompt_if": { + "properties": { + "personal_schemas": { + "const": "yes, automatically use a schema based on the current user name during development" + } + } + }, + "type": "string", + "default": "default", + "pattern": "^\\w+$", + "pattern_match_failure_message": "Invalid schema name.", + "description": "\nPlease provide an initial schema during development.\ndefault_schema", + "order": 5 + } + }, + "success_message": "\n✨ Your new project has been created in the '{{.project_name}}' directory!\n\nPlease refer to the README.md file for \"getting started\" instructions.\nSee also the documentation at https://docs.databricks.com/dev-tools/bundles/index.html." +} diff --git a/libs/template/templates/default-sql/library/versions.tmpl b/libs/template/templates/default-sql/library/versions.tmpl new file mode 100644 index 000000000..f9a879d25 --- /dev/null +++ b/libs/template/templates/default-sql/library/versions.tmpl @@ -0,0 +1,7 @@ +{{define "latest_lts_dbr_version" -}} + 13.3.x-scala2.12 +{{- end}} + +{{define "latest_lts_db_connect_version_spec" -}} + >=13.3,<13.4 +{{- end}} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/extensions.json b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/extensions.json new file mode 100644 index 000000000..8e1023465 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/extensions.json @@ -0,0 +1,7 @@ +{ + "recommendations": [ + "databricks.databricks", + "redhat.vscode-yaml", + "databricks.sqltools-databricks-driver", + ] +} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl new file mode 100644 index 000000000..c63af24b4 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/.vscode/settings.json.tmpl @@ -0,0 +1,28 @@ +{ + "python.analysis.stubPath": ".vscode", + "databricks.python.envFile": "${workspaceFolder}/.env", + "jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])", + "jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------", + "python.testing.pytestArgs": [ + "." + ], + "python.testing.unittestEnabled": false, + "python.testing.pytestEnabled": true, + "python.analysis.extraPaths": ["src"], + "files.exclude": { + "**/*.egg-info": true, + "**/__pycache__": true, + ".pytest_cache": true, + }, + "sqltools.connections": [ + { + "connectionMethod": "VS Code Extension (beta)", + "catalog": "{{.default_catalog}}", + "previewLimit": 50, + "driver": "Databricks", + "name": "databricks", + "path": "{{.http_path}}" + } + ], + "sqltools.autoConnectTo": "", +} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl new file mode 100644 index 000000000..e5c44320d --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/README.md.tmpl @@ -0,0 +1,41 @@ +# {{.project_name}} + +The '{{.project_name}}' project was generated by using the default-sql template. + +## Getting started + +1. Install the Databricks CLI from https://docs.databricks.com/dev-tools/cli/install.html + +2. Authenticate to your Databricks workspace (if you have not done so already): + ``` + $ databricks configure + ``` + +3. To deploy a development copy of this project, type: + ``` + $ databricks bundle deploy --target dev + ``` + (Note that "dev" is the default target, so the `--target` parameter + is optional here.) + + This deploys everything that's defined for this project. + For example, the default template would deploy a job called + `[dev yourname] {{.project_name}}_job` to your workspace. + You can find that job by opening your workpace and clicking on **Workflows**. + +4. Similarly, to deploy a production copy, type: + ``` + $ databricks bundle deploy --target prod + ``` + +5. To run a job, use the "run" command: + ``` + $ databricks bundle run + ``` + +6. Optionally, install developer tools such as the Databricks extension for Visual Studio Code from + https://docs.databricks.com/dev-tools/vscode-ext.html. + +7. For documentation on the Databricks Asset Bundles format used + for this project, and for CI/CD configuration, see + https://docs.databricks.com/dev-tools/bundles/index.html. diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl new file mode 100644 index 000000000..a47fb7c19 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/databricks.yml.tmpl @@ -0,0 +1,71 @@ +# This is a Databricks asset bundle definition for {{.project_name}}. +# See https://docs.databricks.com/dev-tools/bundles/index.html for documentation. +bundle: + name: {{.project_name}} + +include: + - resources/*.yml + +# Variable declarations. These variables are assigned in the dev/prod targets below. +variables: + warehouse_id: + description: The warehouse to use + catalog: + description: The catalog to use + schema: + description: The schema to use + +{{- $dev_schema := .shared_schema }} +{{- $prod_schema := .shared_schema }} +{{- if (regexp "^yes").MatchString .personal_schemas}} +{{- $dev_schema = "${workspace.current_user.short_name}"}} +{{- $prod_schema = "default"}} +{{- end}} + +# Deployment targets. +targets: + # The 'dev' target, for development purposes. This target is the default. + dev: + # We use 'mode: development' to indicate this is a personal development copy. + # Any job schedules and triggers are paused by default + mode: development + default: true + workspace: + host: {{workspace_host}} + variables: + warehouse_id: {{index ((regexp "[^/]+$").FindStringSubmatch .http_path) 0}} + catalog: {{.default_catalog}} + schema: {{$dev_schema}} + + ## Optionally, there could be a 'staging' target here. + ## (See Databricks docs on CI/CD at https://docs.databricks.com/dev-tools/bundles/ci-cd.html.) + # + # staging: + # workspace: + # host: {{workspace_host}} + + # The 'prod' target, used for production deployment. + prod: + # We use 'mode: production' to indicate this is a production deployment. + # Doing so enables strict verification of the settings below. + mode: production + workspace: + host: {{workspace_host}} + # We always use /Users/{{user_name}} for all resources to make sure we only have a single copy. + {{- /* + Internal note 2023-12: CLI versions v0.211.0 and before would show an error when using `mode: production` + with a path that doesn't say "/Shared". For now, we'll include an extra comment in the template + to explain that customers should update if they see this. + */}} + # If this path results in an error, please make sure you have a recent version of the CLI installed. + root_path: /Users/{{user_name}}/.bundle/${bundle.name}/${bundle.target} + variables: + warehouse_id: {{index ((regexp "[^/]+$").FindStringSubmatch .http_path) 0}} + catalog: {{.default_catalog}} + schema: {{$prod_schema}} + {{- if not is_service_principal}} + run_as: + # This runs as {{user_name}} in production. We could also use a service principal here + # using service_principal_name (see https://docs.databricks.com/en/dev-tools/bundles/permissions.html). + user_name: {{user_name}} + {{end -}} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/resources/{{.project_name}}_sql_job.yml.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/resources/{{.project_name}}_sql_job.yml.tmpl new file mode 100644 index 000000000..31d2d21a9 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/resources/{{.project_name}}_sql_job.yml.tmpl @@ -0,0 +1,43 @@ +# A job running SQL queries on a SQL warehouse +resources: + jobs: + {{.project_name}}_sql_job: + name: {{.project_name}}_sql_job + + schedule: + # Run every day at 7:17 AM + quartz_cron_expression: '44 17 7 * * ?' + timezone_id: Europe/Amsterdam + + {{- if not is_service_principal}} + + email_notifications: + on_failure: + - {{user_name}} + + {{else}} + + {{end -}} + + parameters: + - name: catalog + default: ${var.catalog} + - name: schema + default: ${var.schema} + - name: bundle_target + default: ${bundle.target} + + tasks: + - task_key: orders_raw + sql_task: + warehouse_id: ${var.warehouse_id} + file: + path: ../src/orders_raw.sql + + - task_key: orders_daily + depends_on: + - task_key: orders_raw + sql_task: + warehouse_id: ${var.warehouse_id} + file: + path: ../src/orders_daily.sql diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/scratch/README.md b/libs/template/templates/default-sql/template/{{.project_name}}/scratch/README.md new file mode 100644 index 000000000..5350d09cf --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/scratch/README.md @@ -0,0 +1,4 @@ +# scratch + +This folder is reserved for personal, exploratory notebooks and SQL files. +By default these are not committed to Git, as 'scratch' is listed in .gitignore. diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/scratch/exploration.ipynb.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/scratch/exploration.ipynb.tmpl new file mode 100644 index 000000000..becee5fba --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/scratch/exploration.ipynb.tmpl @@ -0,0 +1,35 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": 0, + "metadata": { + "application/vnd.databricks.v1+cell": { + "cellMetadata": {}, + "inputWidgets": {}, + "nuid": "dc8c630c-1ea0-42e4-873f-e4dec4d3d416", + "showTitle": false, + "title": "" + } + }, + "outputs": [], + "source": [ + "%sql\n", + "SELECT * FROM json.`/databricks-datasets/nyctaxi/sample/json/`" + ] + } + ], + "metadata": { + "application/vnd.databricks.v1+notebook": { + "dashboards": [], + "language": "python", + "notebookMetadata": { + "pythonIndentUnit": 2 + }, + "notebookName": "exploration", + "widgets": {} + } + }, + "nbformat": 4, + "nbformat_minor": 0 +} diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl new file mode 100644 index 000000000..8a9d12ea8 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_daily.sql.tmpl @@ -0,0 +1,22 @@ +-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml) +{{- /* We can't use a materialized view here since they don't support 'create or refresh' yet.*/}} + +USE CATALOG {{"{{"}}catalog{{"}}"}}; +USE IDENTIFIER({{"{{"}}schema{{"}}"}}); + +CREATE OR REPLACE VIEW + orders_daily +AS SELECT + order_date, count(*) AS number_of_orders +FROM + orders_raw + +WHERE if( + {{"{{"}}bundle_target{{"}}"}} = "prod", + true, + + -- During development, only process a smaller range of data + order_date >= '2019-08-01' AND order_date < '2019-09-01' +) + +GROUP BY order_date diff --git a/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl new file mode 100644 index 000000000..c73606ef1 --- /dev/null +++ b/libs/template/templates/default-sql/template/{{.project_name}}/src/orders_raw.sql.tmpl @@ -0,0 +1,19 @@ +-- This query is executed using Databricks Workflows (see resources/{{.project_name}}_sql_job.yml) +-- +-- The streaming table below ingests all JSON files in /databricks-datasets/retail-org/sales_orders/ +-- See also https://docs.databricks.com/sql/language-manual/sql-ref-syntax-ddl-create-streaming-table.html + +USE CATALOG {{"{{"}}catalog{{"}}"}}; +USE IDENTIFIER({{"{{"}}schema{{"}}"}}); + +CREATE OR REFRESH STREAMING TABLE + orders_raw +AS SELECT + customer_name, + DATE(TIMESTAMP(FROM_UNIXTIME(TRY_CAST(order_datetime AS BIGINT)))) AS order_date, + order_number +FROM STREAM READ_FILES( + "/databricks-datasets/retail-org/sales_orders/", + format => "json", + header => true +) diff --git a/libs/template/testdata/config-assign-from-file-unknown-property/config.json b/libs/template/testdata/config-assign-from-file-unknown-property/config.json index 518eaa6a2..69ed020cf 100644 --- a/libs/template/testdata/config-assign-from-file-unknown-property/config.json +++ b/libs/template/testdata/config-assign-from-file-unknown-property/config.json @@ -1,3 +1,4 @@ { - "unknown_prop": 123 + "unknown_prop": 123, + "string_val": "i am a known property" } diff --git a/libs/template/testdata/random-int/template/hello.tmpl b/libs/template/testdata/random-int/template/hello.tmpl new file mode 100644 index 000000000..46dc63fb6 --- /dev/null +++ b/libs/template/testdata/random-int/template/hello.tmpl @@ -0,0 +1 @@ +{{print (random_int 10)}} diff --git a/libs/textutil/case.go b/libs/textutil/case.go new file mode 100644 index 000000000..a8c780591 --- /dev/null +++ b/libs/textutil/case.go @@ -0,0 +1,14 @@ +package textutil + +import "unicode" + +func CamelToSnakeCase(name string) string { + var out []rune = make([]rune, 0, len(name)*2) + for i, r := range name { + if i > 0 && unicode.IsUpper(r) { + out = append(out, '_') + } + out = append(out, unicode.ToLower(r)) + } + return string(out) +} diff --git a/libs/textutil/case_test.go b/libs/textutil/case_test.go new file mode 100644 index 000000000..77b3e0679 --- /dev/null +++ b/libs/textutil/case_test.go @@ -0,0 +1,40 @@ +package textutil + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCamelToSnakeCase(t *testing.T) { + cases := []struct { + input string + expected string + }{ + { + input: "test", + expected: "test", + }, + { + input: "testTest", + expected: "test_test", + }, + { + input: "testTestTest", + expected: "test_test_test", + }, + { + input: "TestTest", + expected: "test_test", + }, + { + input: "TestTestTest", + expected: "test_test_test", + }, + } + + for _, c := range cases { + output := CamelToSnakeCase(c.input) + assert.Equal(t, c.expected, output) + } +} diff --git a/libs/vfs/filer.go b/libs/vfs/filer.go new file mode 100644 index 000000000..54f672e06 --- /dev/null +++ b/libs/vfs/filer.go @@ -0,0 +1,66 @@ +package vfs + +import ( + "context" + "io/fs" + "path/filepath" + + "github.com/databricks/cli/libs/filer" +) + +type filerPath struct { + ctx context.Context + path string + fs FS + + construct func(path string) (filer.Filer, error) +} + +func NewFilerPath(ctx context.Context, path string, construct func(path string) (filer.Filer, error)) (Path, error) { + f, err := construct(path) + if err != nil { + return nil, err + } + + return &filerPath{ + ctx: ctx, + path: path, + fs: filer.NewFS(ctx, f).(FS), + + construct: construct, + }, nil +} + +func (f filerPath) Open(name string) (fs.File, error) { + return f.fs.Open(name) +} + +func (f filerPath) Stat(name string) (fs.FileInfo, error) { + return f.fs.Stat(name) +} + +func (f filerPath) ReadDir(name string) ([]fs.DirEntry, error) { + return f.fs.ReadDir(name) +} + +func (f filerPath) ReadFile(name string) ([]byte, error) { + return f.fs.ReadFile(name) +} + +func (f filerPath) Parent() Path { + if f.path == "/" { + return nil + } + + dir := filepath.Dir(f.path) + nf, err := NewFilerPath(f.ctx, dir, f.construct) + if err != nil { + panic(err) + } + + return nf +} + +func (f filerPath) Native() string { + return f.path +} diff --git a/libs/vfs/filer_test.go b/libs/vfs/filer_test.go new file mode 100644 index 000000000..ee1397521 --- /dev/null +++ b/libs/vfs/filer_test.go @@ -0,0 +1,79 @@ +package vfs + +import ( + "context" + "errors" + "io/fs" + "os" + "path/filepath" + "strings" + "testing" + + "github.com/databricks/cli/libs/filer" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFilerPath(t *testing.T) { + ctx := context.Background() + wd, err := os.Getwd() + require.NoError(t, err) + + // Create a new filer-backed path. + p, err := NewFilerPath(ctx, filepath.FromSlash(wd), filer.NewLocalClient) + require.NoError(t, err) + + // Open self. + f, err := p.Open("filer_test.go") + require.NoError(t, err) + defer f.Close() + + // Run stat on self. + s, err := f.Stat() + require.NoError(t, err) + assert.Equal(t, "filer_test.go", s.Name()) + assert.GreaterOrEqual(t, int(s.Size()), 128) + + // Read some bytes. + buf := make([]byte, 1024) + _, err = f.Read(buf) + require.NoError(t, err) + assert.True(t, strings.HasPrefix(string(buf), "package vfs")) + + // Open non-existent file. + _, err = p.Open("doesntexist_test.go") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Stat self. + s, err = p.Stat("filer_test.go") + require.NoError(t, err) + assert.Equal(t, "filer_test.go", s.Name()) + assert.GreaterOrEqual(t, int(s.Size()), 128) + + // Stat non-existent file. + _, err = p.Stat("doesntexist_test.go") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // ReadDir self. + entries, err := p.ReadDir(".") + require.NoError(t, err) + assert.GreaterOrEqual(t, len(entries), 1) + + // ReadDir non-existent directory. + _, err = p.ReadDir("doesntexist") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // ReadFile self. + buf, err = p.ReadFile("filer_test.go") + require.NoError(t, err) + assert.True(t, strings.HasPrefix(string(buf), "package vfs")) + + // ReadFile non-existent file. + _, err = p.ReadFile("doesntexist_test.go") + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Parent self. + pp := p.Parent() + require.NotNil(t, pp) + assert.Equal(t, filepath.Join(pp.Native(), "vfs"), p.Native()) +} diff --git a/libs/vfs/leaf.go b/libs/vfs/leaf.go new file mode 100644 index 000000000..8c11f9039 --- /dev/null +++ b/libs/vfs/leaf.go @@ -0,0 +1,29 @@ +package vfs + +import ( + "errors" + "io/fs" +) + +// FindLeafInTree returns the first path that holds `name`, +// traversing up to the root of the filesystem, starting at `p`. +func FindLeafInTree(p Path, name string) (Path, error) { + for p != nil { + _, err := fs.Stat(p, name) + + // No error means we found the leaf in p. + if err == nil { + return p, nil + } + + // ErrNotExist means we continue traversal up the tree. + if errors.Is(err, fs.ErrNotExist) { + p = p.Parent() + continue + } + + return nil, err + } + + return nil, fs.ErrNotExist +} diff --git a/libs/vfs/leaf_test.go b/libs/vfs/leaf_test.go new file mode 100644 index 000000000..da9412ec0 --- /dev/null +++ b/libs/vfs/leaf_test.go @@ -0,0 +1,38 @@ +package vfs + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestFindLeafInTree(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err) + + root := filepath.Join(wd, "..", "..") + + // Find from working directory should work. + { + out, err := FindLeafInTree(MustNew(wd), ".git") + assert.NoError(t, err) + assert.Equal(t, root, out.Native()) + } + + // Find from project root itself should work. + { + out, err := FindLeafInTree(MustNew(root), ".git") + assert.NoError(t, err) + assert.Equal(t, root, out.Native()) + } + + // Find for something that doesn't exist should work. + { + out, err := FindLeafInTree(MustNew(root), "this-leaf-doesnt-exist-anywhere") + assert.ErrorIs(t, err, os.ErrNotExist) + assert.Equal(t, nil, out) + } +} diff --git a/libs/vfs/os.go b/libs/vfs/os.go new file mode 100644 index 000000000..26447d830 --- /dev/null +++ b/libs/vfs/os.go @@ -0,0 +1,82 @@ +package vfs + +import ( + "io/fs" + "os" + "path/filepath" +) + +type osPath struct { + path string + + openFn func(name string) (fs.File, error) + statFn func(name string) (fs.FileInfo, error) + readDirFn func(name string) ([]fs.DirEntry, error) + readFileFn func(name string) ([]byte, error) +} + +func New(name string) (Path, error) { + abs, err := filepath.Abs(name) + if err != nil { + return nil, err + } + + return newOsPath(abs), nil +} + +func MustNew(name string) Path { + p, err := New(name) + if err != nil { + panic(err) + } + + return p +} + +func newOsPath(name string) Path { + if !filepath.IsAbs(name) { + panic("vfs: abs path must be absolute") + } + + // [os.DirFS] implements all required interfaces. + // We used type assertion below to get the underlying types. + dirfs := os.DirFS(name) + + return &osPath{ + path: name, + + openFn: dirfs.Open, + statFn: dirfs.(fs.StatFS).Stat, + readDirFn: dirfs.(fs.ReadDirFS).ReadDir, + readFileFn: dirfs.(fs.ReadFileFS).ReadFile, + } +} + +func (o osPath) Open(name string) (fs.File, error) { + return o.openFn(name) +} + +func (o osPath) Stat(name string) (fs.FileInfo, error) { + return o.statFn(name) +} + +func (o osPath) ReadDir(name string) ([]fs.DirEntry, error) { + return o.readDirFn(name) +} + +func (o osPath) ReadFile(name string) ([]byte, error) { + return o.readFileFn(name) +} + +func (o osPath) Parent() Path { + dir := filepath.Dir(o.path) + if dir == o.path { + return nil + } + + return newOsPath(dir) +} + +func (o osPath) Native() string { + return o.path +} diff --git a/libs/vfs/os_test.go b/libs/vfs/os_test.go new file mode 100644 index 000000000..6199bdc71 --- /dev/null +++ b/libs/vfs/os_test.go @@ -0,0 +1,54 @@ +package vfs + +import ( + "os" + "path/filepath" + "runtime" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestOsNewWithRelativePath(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err) + + p, err := New(".") + require.NoError(t, err) + require.Equal(t, wd, p.Native()) +} + +func TestOsPathParent(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err) + + p := MustNew(wd) + require.NotNil(t, p) + + // Traverse all the way to the root. + for { + q := p.Parent() + if q == nil { + // Parent returns nil when it is the root. + break + } + + p = q + } + + // We should have reached the root. + if runtime.GOOS == "windows" { + require.Equal(t, filepath.VolumeName(wd)+`\`, p.Native()) + } else { + require.Equal(t, "/", p.Native()) + } +} + +func TestOsPathNative(t *testing.T) { + wd, err := os.Getwd() + require.NoError(t, err) + + p := MustNew(wd) + require.NotNil(t, p) + require.Equal(t, wd, p.Native()) +} diff --git a/libs/vfs/path.go b/libs/vfs/path.go new file mode 100644 index 000000000..19f119d50 --- /dev/null +++ b/libs/vfs/path.go @@ -0,0 +1,29 @@ +package vfs + +import "io/fs" + +// FS combines the fs.FS, fs.StatFS, fs.ReadDirFS, and fs.ReadFileFS interfaces. +// It mandates that Path implementations must support all these interfaces. +type FS interface { + fs.FS + fs.StatFS + fs.ReadDirFS + fs.ReadFileFS +} + +// Path defines a read-only virtual file system interface for: +// +// 1. Intercepting file operations to inject custom logic (e.g., logging, access control). +// 2. Traversing directories to find specific leaf directories (e.g., .git). +// 3. Converting virtual paths to OS-native paths. +// +// Options 2 and 3 are not possible with the standard fs.FS interface. +// They are needed such that we can provide an instance to the sync package +// and still detect the containing .git directory and convert paths to native paths. +type Path interface { + FS + + Parent() Path + + Native() string +} diff --git a/libs/vfs/path_test.go b/libs/vfs/path_test.go new file mode 100644 index 000000000..54c60940e --- /dev/null +++ b/libs/vfs/path_test.go @@ -0,0 +1 @@ +package vfs diff --git a/main.go b/main.go index 8c8516d9d..c568e6adb 100644 --- a/main.go +++ b/main.go @@ -2,11 +2,16 @@ package main import ( "context" + "os" "github.com/databricks/cli/cmd" "github.com/databricks/cli/cmd/root" ) func main() { - root.Execute(cmd.New(context.Background())) + ctx := context.Background() + err := root.Execute(ctx, cmd.New(ctx)) + if err != nil { + os.Exit(1) + } }