mirror of https://github.com/databricks/cli.git
Merge remote-tracking branch 'origin' into init-all
This commit is contained in:
commit
aabb574749
|
@ -18,6 +18,13 @@ var accountCmd = &cobra.Command{
|
|||
|
||||
func init() {
|
||||
root.RootCmd.AddCommand(accountCmd)
|
||||
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) }}
|
||||
accountCmd.AddCommand({{.SnakeName}}.Cmd){{end}}{{end}}{{end}}
|
||||
|
||||
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||
accountCmd.AddCommand({{.SnakeName}}.Cmd)
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
// Register commands with groups
|
||||
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||
{{.SnakeName}}.Cmd.GroupID = "{{ .Package.Name }}"
|
||||
{{end}}{{end}}{{end}}
|
||||
}
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package cmd
|
||||
package workspace
|
||||
|
||||
{{$excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions"}}
|
||||
|
||||
|
@ -11,6 +11,12 @@ import (
|
|||
)
|
||||
|
||||
func init() {
|
||||
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) }}
|
||||
root.RootCmd.AddCommand({{.SnakeName}}.Cmd){{end}}{{end}}{{end}}
|
||||
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||
root.RootCmd.AddCommand({{.SnakeName}}.Cmd)
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
// Register commands with groups
|
||||
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||
{{.SnakeName}}.Cmd.GroupID = "{{ .Package.Name }}"
|
||||
{{end}}{{end}}{{end}}
|
||||
}
|
||||
|
|
|
@ -20,17 +20,31 @@ import (
|
|||
{{define "service"}}
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "{{(.TrimPrefix "account").KebabName}}",
|
||||
{{if .Description -}}
|
||||
Short: `{{.Summary | without "`"}}`,
|
||||
{{- if .Description }}
|
||||
Short: `{{.Summary | without "`"}}`,
|
||||
Long: `{{.Comment " " 80 | without "`"}}`,
|
||||
{{- end}}
|
||||
{{- end }}
|
||||
Annotations: map[string]string{
|
||||
"package": "{{ .Package.Name }}",
|
||||
},
|
||||
{{- if .IsPrivatePreview }}
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
{{- $serviceName := .KebabName -}}
|
||||
{{range .Methods}}
|
||||
|
||||
{{- $excludes := list "put-secret" -}}
|
||||
{{if in $excludes .KebabName }}
|
||||
{{continue}}
|
||||
{{end}}
|
||||
// start {{.KebabName}} command
|
||||
|
||||
{{if .Request}}var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
||||
{{if not .Request.IsOnlyPrimitiveFields}}var {{.CamelName}}Json flags.JsonFlag{{end}}
|
||||
var {{.CamelName}}Json flags.JsonFlag
|
||||
{{- end}}
|
||||
{{if .Wait}}var {{.CamelName}}SkipWait bool
|
||||
var {{.CamelName}}Timeout time.Duration{{end}}
|
||||
|
@ -42,99 +56,132 @@ func init() {
|
|||
{{.CamelName}}Cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||
{{end -}}
|
||||
{{if .Request}}// TODO: short flags
|
||||
{{if not .Request.IsOnlyPrimitiveFields}}{{.CamelName}}Cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`){{end}}
|
||||
{{.CamelName}}Cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
{{$method := .}}
|
||||
{{range .Request.Fields -}}
|
||||
{{ if not .IsJsonOnly }}
|
||||
{{range .Request.Fields -}}
|
||||
{{- if not .Required -}}
|
||||
{{if .Entity.IsObject }}// TODO: complex arg: {{.Name}}
|
||||
{{else if .Entity.IsAny }}// TODO: any: {{.Name}}
|
||||
{{else if .Entity.ArrayValue }}// TODO: array: {{.Name}}
|
||||
{{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}}
|
||||
{{else if .Entity.IsEmpty }}// TODO: output-only field
|
||||
{{else if .Entity.Enum }}{{$method.CamelName}}Cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`"}}`)
|
||||
{{else}}{{$method.CamelName}}Cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
|
||||
{{end}}
|
||||
{{- end -}}
|
||||
{{- end}}
|
||||
{{end}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{end}}
|
||||
}
|
||||
{{ $hasPosArgs := and .Request (or .Request.IsOnlyPrimitiveFields (eq .PascalName "RunNow")) -}}
|
||||
{{- $excludeFromPrompts := list "workspace get-status" -}}
|
||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||
|
||||
{{ $hasPosArgs := and .Request (or .Request.IsAllRequiredFieldsPrimitive (eq .PascalName "RunNow")) -}}
|
||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
||||
{{- $serviceHasNamedIdMap := and .Service.List .Service.List.NamedIdMap -}}
|
||||
{{- $hasIdPrompt := and $hasSinglePosArg $serviceHasNamedIdMap -}}
|
||||
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
|
||||
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
|
||||
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
||||
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
||||
var {{.CamelName}}Cmd = &cobra.Command{
|
||||
Use: "{{.KebabName}}{{if $hasPosArgs}}{{range .Request.RequiredFields}} {{.ConstantName}}{{end}}{{end}}",
|
||||
{{if .Description -}}
|
||||
Short: `{{.Summary | without "`"}}`,
|
||||
{{- if .Description }}
|
||||
Short: `{{.Summary | without "`"}}`,
|
||||
Long: `{{.Comment " " 80 | without "`"}}`,
|
||||
{{end}}
|
||||
Annotations: map[string]string{},{{if and (not $hasIdPrompt) $hasPosArgs }}
|
||||
Args: cobra.ExactArgs({{len .Request.RequiredFields}}),{{end}}
|
||||
{{- end }}
|
||||
{{- if .IsPrivatePreview }}
|
||||
|
||||
// This command is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
{{- end }}
|
||||
|
||||
Annotations: map[string]string{},{{if $hasRequiredArgs }}
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs({{len .Request.RequiredFields}})
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},{{end}}
|
||||
PreRunE: root.Must{{if .Service.IsAccounts}}Account{{else}}Workspace{{end}}Client,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
{{if .Service.IsAccounts}}a := root.AccountClient(ctx){{else}}w := root.WorkspaceClient(ctx){{end}}
|
||||
{{- if .Request -}}
|
||||
{{if $hasIdPrompt}}
|
||||
{{- if .Request }}
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
{{- if $hasIdPrompt}}
|
||||
if len(args) == 0 {
|
||||
names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}})
|
||||
if err != nil {
|
||||
return err
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No{{range .Request.RequiredFields}} {{.ConstantName}}{{end}} argument specified. Loading names for {{.Service.TitleName}} drop-down."
|
||||
names, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{(.Service.TrimPrefix "account").PascalName}}.{{.Service.List.NamedIdMap.PascalName}}(ctx{{if .Service.List.Request}}, {{.Service.Package.Name}}.{{.Service.List.Request.PascalName}}{}{{end}})
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load names for {{.Service.TitleName}} drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "{{range .Request.RequiredFields}}{{.Summary | trimSuffix "."}}{{end}}")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
||||
}{{end}}{{if not .Request.IsOnlyPrimitiveFields}}
|
||||
err = {{.CamelName}}Json.Unmarshal(&{{.CamelName}}Req)
|
||||
if err != nil {
|
||||
return err
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have {{range .Request.RequiredFields}}{{.Summary | trimSuffix "." | lower}}{{end}}")
|
||||
}
|
||||
{{- end -}}
|
||||
{{$method := .}}
|
||||
{{- range $arg, $field := .Request.RequiredFields}}
|
||||
{{if not $field.Entity.IsString -}}
|
||||
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}])
|
||||
}{{else -}}
|
||||
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
|
||||
{{- end -}}{{end}}
|
||||
{{- if and .Request.IsAllRequiredFieldsPrimitive (not .IsJsonOnly) -}}
|
||||
{{- range $arg, $field := .Request.RequiredFields}}
|
||||
{{if not $field.Entity.IsString -}}
|
||||
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}])
|
||||
}{{else -}}
|
||||
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
|
||||
{{- end -}}{{end}}
|
||||
{{- else -}}
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
{{- end -}}
|
||||
}
|
||||
{{end}}
|
||||
{{if $wait -}}
|
||||
wait, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{.Service.PascalName}}.{{.PascalName}}(ctx{{if .Request}}, {{.CamelName}}Req{{end}})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if {{.CamelName}}SkipWait {
|
||||
{{template "method-call" .}}
|
||||
{{if .Response -}}
|
||||
return cmdio.Render(ctx, wait.Response)
|
||||
{{- else -}}
|
||||
return nil
|
||||
{{- end}}
|
||||
}
|
||||
spinner := cmdio.Spinner(ctx)
|
||||
info, err := {{if .Service.IsAccounts}}a{{else}}w{{end}}.{{.Service.PascalName}}.{{.PascalName}}AndWait(ctx{{if .Request}}, {{.CamelName}}Req{{end}},
|
||||
retries.Timeout[{{.Service.Package.Name}}.{{.Wait.Poll.Response.PascalName}}]({{.CamelName}}Timeout),
|
||||
func(i *retries.Info[{{.Service.Package.Name}}.{{.Wait.Poll.Response.PascalName}}]) {
|
||||
if i.Info == nil {
|
||||
return
|
||||
}
|
||||
{{if .Wait.MessagePath -}}
|
||||
{{if .Wait.ComplexMessagePath -}}
|
||||
if i.Info.{{.Wait.MessagePathHead.PascalName}} == nil {
|
||||
return
|
||||
}
|
||||
status := i.Info{{range .Wait.StatusPath}}.{{.PascalName}}{{end}}
|
||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||
if i.Info.{{.Wait.MessagePathHead.PascalName}} != nil {
|
||||
statusMessage = i.Info{{range .Wait.MessagePath}}.{{.PascalName}}{{end}}
|
||||
}
|
||||
{{- else -}}
|
||||
statusMessage := i.Info{{range .Wait.MessagePath}}.{{.PascalName}}{{end}}
|
||||
{{- end}}
|
||||
{{- else -}}
|
||||
status := i.Info{{range .Wait.StatusPath}}.{{.PascalName}}{{end}}
|
||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||
{{- end}}
|
||||
spinner <- statusMessage
|
||||
})
|
||||
info, err := wait.OnProgress(func(i *{{.Service.Package.Name}}.{{.Wait.Poll.Response.PascalName}}) {
|
||||
{{if .Wait.MessagePath -}}
|
||||
{{if .Wait.ComplexMessagePath -}}
|
||||
if i.{{.Wait.MessagePathHead.PascalName}} == nil {
|
||||
return
|
||||
}
|
||||
status := i{{range .Wait.StatusPath}}.{{.PascalName}}{{end}}
|
||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||
if i.{{.Wait.MessagePathHead.PascalName}} != nil {
|
||||
statusMessage = i{{range .Wait.MessagePath}}.{{.PascalName}}{{end}}
|
||||
}
|
||||
{{- else -}}
|
||||
statusMessage := i{{range .Wait.MessagePath}}.{{.PascalName}}{{end}}
|
||||
{{- end}}
|
||||
{{- else -}}
|
||||
status := i{{range .Wait.StatusPath}}.{{.PascalName}}{{end}}
|
||||
statusMessage := fmt.Sprintf("current status: %s", status)
|
||||
{{- end}}
|
||||
spinner <- statusMessage
|
||||
}).GetWithTimeout({{.CamelName}}Timeout)
|
||||
close(spinner)
|
||||
if err != nil {
|
||||
return err
|
||||
|
@ -144,6 +191,9 @@ var {{.CamelName}}Cmd = &cobra.Command{
|
|||
{{template "method-call" .}}
|
||||
{{end -}}
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
{{end}}
|
||||
// end service {{.Name}}{{end}}
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
cmd/account/access-control/access-control.go linguist-generated=true
|
||||
cmd/account/billable-usage/billable-usage.go linguist-generated=true
|
||||
cmd/account/budgets/budgets.go linguist-generated=true
|
||||
cmd/account/cmd.go linguist-generated=true
|
||||
|
@ -13,7 +14,9 @@ cmd/account/networks/networks.go linguist-generated=true
|
|||
cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true
|
||||
cmd/account/private-access/private-access.go linguist-generated=true
|
||||
cmd/account/published-app-integration/published-app-integration.go linguist-generated=true
|
||||
cmd/account/service-principal-secrets/service-principal-secrets.go linguist-generated=true
|
||||
cmd/account/service-principals/service-principals.go linguist-generated=true
|
||||
cmd/account/settings/settings.go linguist-generated=true
|
||||
cmd/account/storage-credentials/storage-credentials.go linguist-generated=true
|
||||
cmd/account/storage/storage.go linguist-generated=true
|
||||
cmd/account/users/users.go linguist-generated=true
|
||||
|
@ -25,6 +28,7 @@ cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
|||
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
||||
cmd/workspace/clusters/clusters.go linguist-generated=true
|
||||
cmd/workspace/cmd.go linguist-generated=true
|
||||
cmd/workspace/connections/connections.go linguist-generated=true
|
||||
cmd/workspace/current-user/current-user.go linguist-generated=true
|
||||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||
|
@ -57,6 +61,7 @@ cmd/workspace/service-principals/service-principals.go linguist-generated=true
|
|||
cmd/workspace/serving-endpoints/serving-endpoints.go linguist-generated=true
|
||||
cmd/workspace/shares/shares.go linguist-generated=true
|
||||
cmd/workspace/storage-credentials/storage-credentials.go linguist-generated=true
|
||||
cmd/workspace/system-schemas/system-schemas.go linguist-generated=true
|
||||
cmd/workspace/table-constraints/table-constraints.go linguist-generated=true
|
||||
cmd/workspace/tables/tables.go linguist-generated=true
|
||||
cmd/workspace/token-management/token-management.go linguist-generated=true
|
||||
|
@ -64,5 +69,6 @@ cmd/workspace/tokens/tokens.go linguist-generated=true
|
|||
cmd/workspace/users/users.go linguist-generated=true
|
||||
cmd/workspace/volumes/volumes.go linguist-generated=true
|
||||
cmd/workspace/warehouses/warehouses.go linguist-generated=true
|
||||
cmd/workspace/workspace-bindings/workspace-bindings.go linguist-generated=true
|
||||
cmd/workspace/workspace-conf/workspace-conf.go linguist-generated=true
|
||||
cmd/workspace/workspace/workspace.go linguist-generated=true
|
||||
|
|
103
CHANGELOG.md
103
CHANGELOG.md
|
@ -1,5 +1,108 @@
|
|||
# Version changelog
|
||||
|
||||
## 0.200.0
|
||||
|
||||
This version marks the first version available as public preview.
|
||||
|
||||
The minor bump to 200 better disambiguates between Databricks CLI "v1" (the Python version)
|
||||
and this version, Databricks CLI "v2". The minor version of 0.100 may look lower than 0.17
|
||||
to some, whereas 200 does not. This bump has no other significance.
|
||||
|
||||
CLI:
|
||||
* Add filer.Filer implementation backed by the Files API ([#474](https://github.com/databricks/cli/pull/474)).
|
||||
* Add fs cp command ([#463](https://github.com/databricks/cli/pull/463)).
|
||||
* Correctly set ExactArgs if generated command has positional arguments ([#488](https://github.com/databricks/cli/pull/488)).
|
||||
* Do not use white color as string output ([#489](https://github.com/databricks/cli/pull/489)).
|
||||
* Update README to reflect public preview status ([#491](https://github.com/databricks/cli/pull/491)).
|
||||
|
||||
Bundles:
|
||||
* Fix force flag not working for bundle destroy ([#434](https://github.com/databricks/cli/pull/434)).
|
||||
* Fix locker unlock for destroy ([#492](https://github.com/databricks/cli/pull/492)).
|
||||
* Use better error assertions and clean up locker API ([#490](https://github.com/databricks/cli/pull/490)).
|
||||
|
||||
Dependencies:
|
||||
* Bump golang.org/x/mod from 0.10.0 to 0.11.0 ([#496](https://github.com/databricks/cli/pull/496)).
|
||||
* Bump golang.org/x/sync from 0.2.0 to 0.3.0 ([#495](https://github.com/databricks/cli/pull/495)).
|
||||
|
||||
## 0.100.4
|
||||
|
||||
CLI:
|
||||
* Add workspace import-dir command ([#456](https://github.com/databricks/cli/pull/456)).
|
||||
* Annotate generated commands with OpenAPI package name ([#466](https://github.com/databricks/cli/pull/466)).
|
||||
* Associate generated commands with command groups ([#475](https://github.com/databricks/cli/pull/475)).
|
||||
* Disable shell completions for generated commands ([#483](https://github.com/databricks/cli/pull/483)).
|
||||
* Include [DEFAULT] section header when writing ~/.databrickscfg ([#464](https://github.com/databricks/cli/pull/464)).
|
||||
* Pass through proxy related environment variables ([#465](https://github.com/databricks/cli/pull/465)).
|
||||
* Restore flags to original values on test completion ([#470](https://github.com/databricks/cli/pull/470)).
|
||||
* Update configure command ([#482](https://github.com/databricks/cli/pull/482)).
|
||||
|
||||
Dependencies:
|
||||
* Bump SDK to latest ([#473](https://github.com/databricks/cli/pull/473)).
|
||||
|
||||
## 0.100.3
|
||||
|
||||
CLI:
|
||||
* Add directory tracking to sync ([#425](https://github.com/databricks/cli/pull/425)).
|
||||
* Add fs cat command for dbfs files ([#430](https://github.com/databricks/cli/pull/430)).
|
||||
* Add fs ls command for dbfs ([#429](https://github.com/databricks/cli/pull/429)).
|
||||
* Add fs mkdirs command for dbfs ([#432](https://github.com/databricks/cli/pull/432)).
|
||||
* Add fs rm command for dbfs ([#433](https://github.com/databricks/cli/pull/433)).
|
||||
* Add installation instructions ([#458](https://github.com/databricks/cli/pull/458)).
|
||||
* Add new line to cmdio JSON rendering ([#443](https://github.com/databricks/cli/pull/443)).
|
||||
* Add profile on `databricks auth login` ([#423](https://github.com/databricks/cli/pull/423)).
|
||||
* Add readable console logger ([#370](https://github.com/databricks/cli/pull/370)).
|
||||
* Add workspace export-dir command ([#449](https://github.com/databricks/cli/pull/449)).
|
||||
* Added secrets input prompt for secrets put-secret command ([#413](https://github.com/databricks/cli/pull/413)).
|
||||
* Added spinner when loading command prompts ([#420](https://github.com/databricks/cli/pull/420)).
|
||||
* Better error message if can not load prompts ([#437](https://github.com/databricks/cli/pull/437)).
|
||||
* Changed service template to correctly handle required positional arguments ([#405](https://github.com/databricks/cli/pull/405)).
|
||||
* Do not generate prompts for certain commands ([#438](https://github.com/databricks/cli/pull/438)).
|
||||
* Do not prompt for List methods ([#411](https://github.com/databricks/cli/pull/411)).
|
||||
* Do not use FgWhite and FgBlack for terminal output ([#435](https://github.com/databricks/cli/pull/435)).
|
||||
* Skip path translation of job task for jobs with a Git source ([#404](https://github.com/databricks/cli/pull/404)).
|
||||
* Tweak profile prompt ([#454](https://github.com/databricks/cli/pull/454)).
|
||||
* Update with the latest Go SDK ([#457](https://github.com/databricks/cli/pull/457)).
|
||||
* Use cmdio in version command for `--output` flag ([#419](https://github.com/databricks/cli/pull/419)).
|
||||
|
||||
Bundles:
|
||||
* Check for nil environment before accessing it ([#453](https://github.com/databricks/cli/pull/453)).
|
||||
|
||||
Dependencies:
|
||||
* Bump github.com/hashicorp/terraform-json from 0.16.0 to 0.17.0 ([#459](https://github.com/databricks/cli/pull/459)).
|
||||
* Bump github.com/mattn/go-isatty from 0.0.18 to 0.0.19 ([#412](https://github.com/databricks/cli/pull/412)).
|
||||
|
||||
Internal:
|
||||
* Add Mkdir and ReadDir functions to filer.Filer interface ([#414](https://github.com/databricks/cli/pull/414)).
|
||||
* Add Stat function to filer.Filer interface ([#421](https://github.com/databricks/cli/pull/421)).
|
||||
* Add check for path is a directory in filer.ReadDir ([#426](https://github.com/databricks/cli/pull/426)).
|
||||
* Add fs.FS adapter for the filer interface ([#422](https://github.com/databricks/cli/pull/422)).
|
||||
* Add implementation of filer.Filer for local filesystem ([#460](https://github.com/databricks/cli/pull/460)).
|
||||
* Allow equivalence checking of filer errors to fs errors ([#416](https://github.com/databricks/cli/pull/416)).
|
||||
* Fix locker integration test ([#417](https://github.com/databricks/cli/pull/417)).
|
||||
* Implement DBFS filer ([#139](https://github.com/databricks/cli/pull/139)).
|
||||
* Include recursive deletion in filer interface ([#442](https://github.com/databricks/cli/pull/442)).
|
||||
* Make filer.Filer return fs.DirEntry from ReadDir ([#415](https://github.com/databricks/cli/pull/415)).
|
||||
* Speed up sync integration tests ([#428](https://github.com/databricks/cli/pull/428)).
|
||||
|
||||
## 0.100.2
|
||||
|
||||
CLI:
|
||||
* Reduce parallellism in locker integration test ([#407](https://github.com/databricks/bricks/pull/407)).
|
||||
|
||||
Bundles:
|
||||
* Don't pass synthesized TMPDIR if not already set ([#409](https://github.com/databricks/bricks/pull/409)).
|
||||
* Added support for bundle.Seq, simplified Mutator.Apply interface ([#403](https://github.com/databricks/bricks/pull/403)).
|
||||
* Regenerated internal schema structs based on Terraform provider schemas ([#401](https://github.com/databricks/bricks/pull/401)).
|
||||
|
||||
## 0.100.1
|
||||
|
||||
CLI:
|
||||
* Sync: Gracefully handle broken notebook files ([#398](https://github.com/databricks/cli/pull/398)).
|
||||
* Add version flag to print version and exit ([#394](https://github.com/databricks/cli/pull/394)).
|
||||
* Pass temporary directory environment variables to subprocesses ([#395](https://github.com/databricks/cli/pull/395)).
|
||||
* Rename environment variables `BRICKS_` -> `DATABRICKS_` ([#393](https://github.com/databricks/cli/pull/393)).
|
||||
* Update to Go SDK v0.9.0 ([#396](https://github.com/databricks/cli/pull/396)).
|
||||
|
||||
## 0.100.0
|
||||
|
||||
This release bumps the minor version to 100 to disambiguate between Databricks CLI "v1" (the Python version)
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
DB license
|
||||
|
||||
Copyright (2022) Databricks, Inc.
|
||||
|
||||
Definitions.
|
||||
|
||||
Agreement: The agreement between Databricks, Inc., and you governing the use of the Databricks Services, which shall be, with respect to Databricks, the Databricks Terms of Service located at www.databricks.com/termsofservice, and with respect to Databricks Community Edition, the Community Edition Terms of Service located at www.databricks.com/ce-termsofuse, in each case unless you have entered into a separate written agreement with Databricks governing the use of the applicable Databricks Services.
|
||||
|
||||
Software: The source code and object code to which this license applies.
|
||||
|
||||
Scope of Use. You may not use this Software except in connection with your use of the Databricks Services pursuant to the Agreement. Your use of the Software must comply at all times with any restrictions applicable to the Databricks Services, generally, and must be used in accordance with any applicable documentation. You may view, use, copy, modify, publish, and/or distribute the Software solely for the purposes of using the code within or connecting to the Databricks Services. If you do not agree to these terms, you may not view, use, copy, modify, publish, and/or distribute the Software.
|
||||
|
||||
Redistribution. You may redistribute and sublicense the Software so long as all use is in compliance with these terms. In addition:
|
||||
|
||||
You must give any other recipients a copy of this License;
|
||||
You must cause any modified files to carry prominent notices stating that you changed the files;
|
||||
You must retain, in the source code form of any derivative works that you distribute, all copyright, patent, trademark, and attribution notices from the source code form, excluding those notices that do not pertain to any part of the derivative works; and
|
||||
If the source code form includes a "NOTICE" text file as part of its distribution, then any derivative works that you distribute must include a readable copy of the attribution notices contained within such NOTICE file, excluding those notices that do not pertain to any part of the derivative works.
|
||||
You may add your own copyright statement to your modifications and may provide additional license terms and conditions for use, reproduction, or distribution of your modifications, or for any such derivative works as a whole, provided your use, reproduction, and distribution of the Software otherwise complies with the conditions stated in this License.
|
||||
|
||||
Termination. This license terminates automatically upon your breach of these terms or upon the termination of your Agreement. Additionally, Databricks may terminate this license at any time on notice. Upon termination, you must permanently delete the Software and all copies thereof.
|
||||
|
||||
DISCLAIMER; LIMITATION OF LIABILITY.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS-IS” AND WITH ALL FAULTS. DATABRICKS, ON BEHALF OF ITSELF AND ITS LICENSORS, SPECIFICALLY DISCLAIMS ALL WARRANTIES RELATING TO THE SOURCE CODE, EXPRESS AND IMPLIED, INCLUDING, WITHOUT LIMITATION, IMPLIED WARRANTIES, CONDITIONS AND OTHER TERMS OF MERCHANTABILITY, SATISFACTORY QUALITY OR FITNESS FOR A PARTICULAR PURPOSE, AND NON-INFRINGEMENT. DATABRICKS AND ITS LICENSORS TOTAL AGGREGATE LIABILITY RELATING TO OR ARISING OUT OF YOUR USE OF OR DATABRICKS’ PROVISIONING OF THE SOURCE CODE SHALL BE LIMITED TO ONE THOUSAND ($1,000) DOLLARS. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -0,0 +1,100 @@
|
|||
Copyright (2023) Databricks, Inc.
|
||||
|
||||
This Software includes software developed at Databricks (https://www.databricks.com/) and its use is subject to the included LICENSE file.
|
||||
|
||||
This Software contains code from the following open source projects, licensed under the Apache 2.0 license:
|
||||
|
||||
spf13/cobra - https://github.com/spf13/cobra
|
||||
Copyright cobra authors
|
||||
License - https://github.com/spf13/cobra/blob/main/LICENSE.txt
|
||||
|
||||
briandowns/spinner - https://github.com/briandowns/spinner
|
||||
Copyright 2022 Brian J. Downs
|
||||
License - https://github.com/briandowns/spinner/blob/master/LICENSE
|
||||
|
||||
go-ini/ini - https://github.com/go-ini/ini
|
||||
Copyright ini authors
|
||||
License - https://github.com/go-ini/ini/blob/main/LICENSE
|
||||
|
||||
—--
|
||||
|
||||
This software contains code from the following open source projects, licensed under the MPL 2.0 license:
|
||||
|
||||
hashicopr/go-version - https://github.com/hashicorp/go-version
|
||||
Copyright 2014 HashiCorp, Inc.
|
||||
License - https://github.com/hashicorp/go-version/blob/main/LICENSE
|
||||
|
||||
hashicorp/hc-install - https://github.com/hashicorp/hc-install
|
||||
Copyright 2020 HashiCorp, Inc.
|
||||
License - https://github.com/hashicorp/hc-install/blob/main/LICENSE
|
||||
|
||||
hashicopr/terraform-exec - https://github.com/hashicorp/terraform-exec
|
||||
Copyright 2020 HashiCorp, Inc.
|
||||
LIcense - https://github.com/hashicorp/terraform-exec/blob/main/LICENSE
|
||||
|
||||
hashicorp/terraform-json - https://github.com/hashicorp/terraform-json
|
||||
Copyright 2019 HashiCorp, Inc.
|
||||
License - https://github.com/hashicorp/terraform-json/blob/main/LICENSE
|
||||
|
||||
---
|
||||
|
||||
This software contains code from the following open source projects, licensed under the BSD (2-clause) license:
|
||||
|
||||
pkg/browser - https://github.com/pkg/browser
|
||||
Copyright (c) 2014, Dave Cheney <dave@cheney.net>
|
||||
License - https://github.com/pkg/browser/blob/master/LICENSE
|
||||
|
||||
---
|
||||
|
||||
This software contains code from the following open source projects, licensed under the BSD (3-clause) license:
|
||||
|
||||
spf13/pflag - https://github.com/spf13/pflag
|
||||
Copyright (c) 2012 Alex Ogier. All rights reserved.
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
License - https://raw.githubusercontent.com/spf13/pflag/master/LICENSE
|
||||
|
||||
google/uuid - https://github.com/google/uuid
|
||||
Copyright (c) 2009,2014 Google Inc. All rights reserved.
|
||||
License - https://github.com/google/uuid/blob/master/LICENSE
|
||||
|
||||
imdario/mergo - https://github.com/imdario/mergo
|
||||
Copyright (c) 2013 Dario Castañé. All rights reserved.
|
||||
Copyright (c) 2012 The Go Authors. All rights reserved.
|
||||
License - https://github.com/imdario/mergo/blob/master/LICENSE
|
||||
|
||||
manifoldco/promptui - https://github.com/manifoldco/promptui
|
||||
Copyright (c) 2017, Arigato Machine Inc. All rights reserved.
|
||||
License - https://github.com/manifoldco/promptui/blob/master/LICENSE.md
|
||||
|
||||
—--
|
||||
|
||||
This Software contains code from the following open source projects, licensed under the MIT license:
|
||||
|
||||
fatih/color - https://github.com/fatih/color
|
||||
Copyright (c) 2013 Fatih Arslan
|
||||
License - https://github.com/fatih/color/blob/main/LICENSE.md
|
||||
|
||||
ghodss/yaml - https://github.com/ghodss/yaml
|
||||
Copyright (c) 2014 Sam Ghods
|
||||
License - https://github.com/ghodss/yaml/blob/master/LICENSE
|
||||
|
||||
mattn/go-isatty - https://github.com/mattn/go-isatty
|
||||
Copyright (c) Yasuhiro MATSUMOTO <mattn.jp@gmail.com>
|
||||
https://github.com/mattn/go-isatty/blob/master/LICENSE
|
||||
|
||||
nwidger/jsoncolor - https://github.com/nwidger/jsoncolor
|
||||
Copyright (c) 2016 Niels Widger
|
||||
License - https://github.com/nwidger/jsoncolor/blob/master/LICENSE
|
||||
|
||||
sabhiram/go-gitignore - https://github.com/sabhiram/go-gitignore
|
||||
Copyright (c) 2015 Shaba Abhiram
|
||||
License - https://github.com/sabhiram/go-gitignore/blob/master/LICENSE
|
||||
|
||||
|
||||
stretchr/testify - https://github.com/stretchr/testify
|
||||
Copyright (c) 2012-2020 Mat Ryer, Tyler Bunnell and contributors.
|
||||
License - https://github.com/stretchr/testify/blob/master/LICENSE
|
||||
|
||||
whilp/git-urls - https://github.com/whilp/git-urls
|
||||
Copyright (c) 2020 Will Maier
|
||||
License - https://github.com/whilp/git-urls/blob/master/LICENSE
|
11
README.md
11
README.md
|
@ -2,19 +2,18 @@
|
|||
|
||||
[![build](https://github.com/databricks/cli/workflows/build/badge.svg?branch=main)](https://github.com/databricks/cli/actions?query=workflow%3Abuild+branch%3Amain)
|
||||
|
||||
This project is in private preview.
|
||||
This project is in public preview.
|
||||
|
||||
Documentation about the full REST API coverage is avaialbe in the [docs folder](docs/commands.md).
|
||||
|
||||
Documentation is available at https://docs.databricks.com/dev-tools/cli/bricks-cli.html.
|
||||
Documentation is available at https://docs.databricks.com/dev-tools/cli/databricks-cli.html.
|
||||
|
||||
## Installation
|
||||
|
||||
This CLI is packaged as a dependency-free binary executable and may be located in any directory.
|
||||
|
||||
For convenient access, copy the `databricks` binary to any directory listed in `$PATH`.
|
||||
|
||||
Confirm the binary works by executing `databricks version`.
|
||||
See https://github.com/databricks/cli/releases for releases and
|
||||
[the docs pages](https://docs.databricks.com/dev-tools/cli/databricks-cli.html) for
|
||||
installation instructions.
|
||||
|
||||
## Authentication
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ func (m *all) Name() string {
|
|||
return fmt.Sprintf("artifacts.%sAll", m.name)
|
||||
}
|
||||
|
||||
func (m *all) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *all) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
var out []bundle.Mutator
|
||||
|
||||
// Iterate with stable ordering.
|
||||
|
@ -30,12 +30,12 @@ func (m *all) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, er
|
|||
for _, name := range keys {
|
||||
m, err := m.fn(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if m != nil {
|
||||
out = append(out, m)
|
||||
}
|
||||
}
|
||||
|
||||
return out, nil
|
||||
return bundle.Apply(ctx, b, bundle.Seq(out...))
|
||||
}
|
||||
|
|
|
@ -27,15 +27,15 @@ func (m *build) Name() string {
|
|||
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
if artifact.Notebook != nil {
|
||||
return []bundle.Mutator{notebook.Build(m.name)}, nil
|
||||
return bundle.Apply(ctx, b, notebook.Build(m.name))
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -27,10 +27,10 @@ func (m *build) Name() string {
|
|||
return fmt.Sprintf("notebook.Build(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *build) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *build) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
a, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
artifact := a.Notebook
|
||||
|
@ -44,35 +44,35 @@ func (m *build) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, er
|
|||
case ".sql":
|
||||
artifact.Language = workspace.LanguageSql
|
||||
default:
|
||||
return nil, fmt.Errorf("invalid notebook extension: %s", ext)
|
||||
return fmt.Errorf("invalid notebook extension: %s", ext)
|
||||
}
|
||||
|
||||
// Open underlying file.
|
||||
f, err := os.Open(filepath.Join(b.Config.Path, artifact.Path))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to open artifact file %s: %w", artifact.Path, errors.Unwrap(err))
|
||||
return fmt.Errorf("unable to open artifact file %s: %w", artifact.Path, errors.Unwrap(err))
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Check that the file contains the notebook marker on its first line.
|
||||
ok, err = hasMarker(artifact.Language, f)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read artifact file %s: %s", artifact.Path, errors.Unwrap(err))
|
||||
return fmt.Errorf("unable to read artifact file %s: %s", artifact.Path, errors.Unwrap(err))
|
||||
}
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("notebook marker not found in %s", artifact.Path)
|
||||
return fmt.Errorf("notebook marker not found in %s", artifact.Path)
|
||||
}
|
||||
|
||||
// Check that an artifact path is defined.
|
||||
remotePath := b.Config.Workspace.ArtifactsPath
|
||||
if remotePath == "" {
|
||||
return nil, fmt.Errorf("remote artifact path not configured")
|
||||
return fmt.Errorf("remote artifact path not configured")
|
||||
}
|
||||
|
||||
// Store absolute paths.
|
||||
artifact.LocalPath = filepath.Join(b.Config.Path, artifact.Path)
|
||||
artifact.RemotePath = path.Join(remotePath, stripExtension(artifact.Path))
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func stripExtension(path string) string {
|
||||
|
|
|
@ -26,35 +26,35 @@ func (m *upload) Name() string {
|
|||
return fmt.Sprintf("notebook.Upload(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
a, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
artifact := a.Notebook
|
||||
raw, err := os.ReadFile(artifact.LocalPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to read %s: %w", m.name, errors.Unwrap(err))
|
||||
return fmt.Errorf("unable to read %s: %w", m.name, errors.Unwrap(err))
|
||||
}
|
||||
|
||||
// Make sure target directory exists.
|
||||
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, path.Dir(artifact.RemotePath))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to create directory for %s: %w", m.name, err)
|
||||
return fmt.Errorf("unable to create directory for %s: %w", m.name, err)
|
||||
}
|
||||
|
||||
// Import to workspace.
|
||||
err = b.WorkspaceClient().Workspace.Import(ctx, workspace.Import{
|
||||
Path: artifact.RemotePath,
|
||||
Overwrite: true,
|
||||
Format: workspace.ExportFormatSource,
|
||||
Format: workspace.ImportFormatSource,
|
||||
Language: artifact.Language,
|
||||
Content: base64.StdEncoding.EncodeToString(raw),
|
||||
})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to import %s: %w", m.name, err)
|
||||
return fmt.Errorf("unable to import %s: %w", m.name, err)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -27,15 +27,15 @@ func (m *upload) Name() string {
|
|||
return fmt.Sprintf("artifacts.Upload(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
if artifact.Notebook != nil {
|
||||
return []bundle.Mutator{notebook.Upload(m.name)}, nil
|
||||
return bundle.Apply(ctx, b, notebook.Upload(m.name))
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -91,8 +91,6 @@ func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient {
|
|||
return b.client
|
||||
}
|
||||
|
||||
var cacheDirName = filepath.Join(".databricks", "bundle")
|
||||
|
||||
// CacheDir returns directory to use for temporary files for this bundle.
|
||||
// Scoped to the bundle's environment.
|
||||
func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
||||
|
@ -100,11 +98,20 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
|||
panic("environment not set")
|
||||
}
|
||||
|
||||
cacheDirName, exists := os.LookupEnv("DATABRICKS_BUNDLE_TMP")
|
||||
|
||||
if !exists || cacheDirName == "" {
|
||||
cacheDirName = filepath.Join(
|
||||
// Anchor at bundle root directory.
|
||||
b.Config.Path,
|
||||
// Static cache directory.
|
||||
".databricks",
|
||||
"bundle",
|
||||
)
|
||||
}
|
||||
|
||||
// Fixed components of the result path.
|
||||
parts := []string{
|
||||
// Anchor at bundle root directory.
|
||||
b.Config.Path,
|
||||
// Static cache directory.
|
||||
cacheDirName,
|
||||
// Scope with environment name.
|
||||
b.Config.Bundle.Environment,
|
||||
|
|
|
@ -3,7 +3,6 @@ package bundle
|
|||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -35,9 +34,38 @@ func TestBundleCacheDir(t *testing.T) {
|
|||
// This is otherwise done by [mutators.SelectEnvironment].
|
||||
bundle.Config.Bundle.Environment = "default"
|
||||
|
||||
// unset env variable in case it's set
|
||||
t.Setenv("DATABRICKS_BUNDLE_TMP", "")
|
||||
|
||||
cacheDir, err := bundle.CacheDir()
|
||||
|
||||
// format is <CWD>/.databricks/bundle/<environment>
|
||||
assert.NoError(t, err)
|
||||
assert.True(t, strings.HasPrefix(cacheDir, projectDir))
|
||||
assert.Equal(t, filepath.Join(projectDir, ".databricks", "bundle", "default"), cacheDir)
|
||||
}
|
||||
|
||||
func TestBundleCacheDirOverride(t *testing.T) {
|
||||
projectDir := t.TempDir()
|
||||
bundleTmpDir := t.TempDir()
|
||||
f1, err := os.Create(filepath.Join(projectDir, "bundle.yml"))
|
||||
require.NoError(t, err)
|
||||
f1.Close()
|
||||
|
||||
bundle, err := Load(projectDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Artificially set environment.
|
||||
// This is otherwise done by [mutators.SelectEnvironment].
|
||||
bundle.Config.Bundle.Environment = "default"
|
||||
|
||||
// now we expect to use 'bundleTmpDir' instead of CWD/.databricks/bundle
|
||||
t.Setenv("DATABRICKS_BUNDLE_TMP", bundleTmpDir)
|
||||
|
||||
cacheDir, err := bundle.CacheDir()
|
||||
|
||||
// format is <DATABRICKS_BUNDLE_TMP>/<environment>
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, filepath.Join(bundleTmpDir, "default"), cacheDir)
|
||||
}
|
||||
|
||||
func TestBundleMustLoadSuccess(t *testing.T) {
|
||||
|
|
|
@ -247,6 +247,6 @@ func (m *interpolate) Name() string {
|
|||
return "Interpolate"
|
||||
}
|
||||
|
||||
func (m *interpolate) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
return nil, m.expand(&b.Config)
|
||||
func (m *interpolate) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
return m.expand(&b.Config)
|
||||
}
|
||||
|
|
|
@ -24,14 +24,14 @@ func (m *defineDefaultEnvironment) Name() string {
|
|||
return fmt.Sprintf("DefineDefaultEnvironment(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *defineDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *defineDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
// Nothing to do if the configuration has at least 1 environment.
|
||||
if len(b.Config.Environments) > 0 {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Define default environment.
|
||||
b.Config.Environments = make(map[string]*config.Environment)
|
||||
b.Config.Environments[m.name] = &config.Environment{}
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -13,7 +13,7 @@ import (
|
|||
|
||||
func TestDefaultEnvironment(t *testing.T) {
|
||||
bundle := &bundle.Bundle{}
|
||||
_, err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
env, ok := bundle.Config.Environments["default"]
|
||||
assert.True(t, ok)
|
||||
|
@ -28,7 +28,7 @@ func TestDefaultEnvironmentAlreadySpecified(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
_, ok := bundle.Config.Environments["default"]
|
||||
assert.False(t, ok)
|
||||
|
|
|
@ -28,9 +28,9 @@ func (m *defineDefaultInclude) Name() string {
|
|||
return "DefineDefaultInclude"
|
||||
}
|
||||
|
||||
func (m *defineDefaultInclude) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *defineDefaultInclude) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
if len(b.Config.Include) == 0 {
|
||||
b.Config.Include = slices.Clone(m.include)
|
||||
}
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
|
||||
func TestDefaultInclude(t *testing.T) {
|
||||
bundle := &bundle.Bundle{}
|
||||
_, err := mutator.DefineDefaultInclude().Apply(context.Background(), bundle)
|
||||
err := mutator.DefineDefaultInclude().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"*.yml", "*/*.yml"}, bundle.Config.Include)
|
||||
}
|
||||
|
|
|
@ -19,10 +19,10 @@ func (m *defineDefaultWorkspacePaths) Name() string {
|
|||
return "DefaultWorkspacePaths"
|
||||
}
|
||||
|
||||
func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
root := b.Config.Workspace.RootPath
|
||||
if root == "" {
|
||||
return nil, fmt.Errorf("unable to define default workspace paths: workspace root not defined")
|
||||
return fmt.Errorf("unable to define default workspace paths: workspace root not defined")
|
||||
}
|
||||
|
||||
if b.Config.Workspace.FilesPath == "" {
|
||||
|
@ -37,5 +37,5 @@ func (m *defineDefaultWorkspacePaths) Apply(ctx context.Context, b *bundle.Bundl
|
|||
b.Config.Workspace.StatePath = path.Join(root, "state")
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -19,7 +19,7 @@ func TestDefineDefaultWorkspacePaths(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
||||
err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "/files", bundle.Config.Workspace.FilesPath)
|
||||
assert.Equal(t, "/artifacts", bundle.Config.Workspace.ArtifactsPath)
|
||||
|
@ -37,7 +37,7 @@ func TestDefineDefaultWorkspacePathsAlreadySet(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
||||
err := mutator.DefineDefaultWorkspacePaths().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "/foo/bar", bundle.Config.Workspace.FilesPath)
|
||||
assert.Equal(t, "/foo/bar", bundle.Config.Workspace.ArtifactsPath)
|
||||
|
|
|
@ -18,17 +18,17 @@ func (m *defineDefaultWorkspaceRoot) Name() string {
|
|||
return "DefineDefaultWorkspaceRoot"
|
||||
}
|
||||
|
||||
func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
if b.Config.Workspace.RootPath != "" {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.Config.Bundle.Name == "" {
|
||||
return nil, fmt.Errorf("unable to define default workspace root: bundle name not defined")
|
||||
return fmt.Errorf("unable to define default workspace root: bundle name not defined")
|
||||
}
|
||||
|
||||
if b.Config.Bundle.Environment == "" {
|
||||
return nil, fmt.Errorf("unable to define default workspace root: bundle environment not selected")
|
||||
return fmt.Errorf("unable to define default workspace root: bundle environment not selected")
|
||||
}
|
||||
|
||||
b.Config.Workspace.RootPath = fmt.Sprintf(
|
||||
|
@ -36,5 +36,5 @@ func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle
|
|||
b.Config.Bundle.Name,
|
||||
b.Config.Bundle.Environment,
|
||||
)
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -20,7 +20,7 @@ func TestDefaultWorkspaceRoot(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.DefineDefaultWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
err := mutator.DefineDefaultWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "~/.bundle/name/environment", bundle.Config.Workspace.RootPath)
|
||||
}
|
||||
|
|
|
@ -20,15 +20,15 @@ func (m *expandWorkspaceRoot) Name() string {
|
|||
return "ExpandWorkspaceRoot"
|
||||
}
|
||||
|
||||
func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
root := b.Config.Workspace.RootPath
|
||||
if root == "" {
|
||||
return nil, fmt.Errorf("unable to expand workspace root: workspace root not defined")
|
||||
return fmt.Errorf("unable to expand workspace root: workspace root not defined")
|
||||
}
|
||||
|
||||
currentUser := b.Config.Workspace.CurrentUser
|
||||
if currentUser == nil || currentUser.UserName == "" {
|
||||
return nil, fmt.Errorf("unable to expand workspace root: current user not set")
|
||||
return fmt.Errorf("unable to expand workspace root: current user not set")
|
||||
}
|
||||
|
||||
if strings.HasPrefix(root, "~/") {
|
||||
|
@ -36,5 +36,5 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) ([]bu
|
|||
b.Config.Workspace.RootPath = path.Join(home, root[2:])
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -23,7 +23,7 @@ func TestExpandWorkspaceRoot(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "/Users/jane@doe.com/foo", bundle.Config.Workspace.RootPath)
|
||||
}
|
||||
|
@ -39,7 +39,7 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "/Users/charly@doe.com/foo", bundle.Config.Workspace.RootPath)
|
||||
}
|
||||
|
@ -54,7 +54,7 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
||||
|
@ -66,6 +66,6 @@ func TestExpandWorkspaceRootWithoutCurrentUser(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
err := mutator.ExpandWorkspaceRoot().Apply(context.Background(), bundle)
|
||||
require.Error(t, err)
|
||||
}
|
||||
|
|
|
@ -18,11 +18,11 @@ func (m *loadGitDetails) Name() string {
|
|||
return "LoadGitDetails"
|
||||
}
|
||||
|
||||
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
// Load relevant git repository
|
||||
repo, err := git.NewRepository(b.Config.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
// load branch name if undefined
|
||||
if b.Config.Bundle.Git.Branch == "" {
|
||||
|
@ -47,5 +47,5 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.
|
|||
remoteUrl := repo.OriginUrl()
|
||||
b.Config.Bundle.Git.OriginURL = remoteUrl
|
||||
}
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,13 +17,13 @@ func (m *populateCurrentUser) Name() string {
|
|||
return "PopulateCurrentUser"
|
||||
}
|
||||
|
||||
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
w := b.WorkspaceClient()
|
||||
me, err := w.CurrentUser.Me(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
b.Config.Workspace.CurrentUser = me
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -25,10 +25,10 @@ func (m *processInclude) Name() string {
|
|||
return fmt.Sprintf("ProcessInclude(%s)", m.relPath)
|
||||
}
|
||||
|
||||
func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *processInclude) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
this, err := config.Load(m.fullPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
return nil, b.Config.Merge(this)
|
||||
return b.Config.Merge(this)
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ func TestProcessInclude(t *testing.T) {
|
|||
f.Close()
|
||||
|
||||
assert.Equal(t, "foo", bundle.Config.Workspace.Host)
|
||||
_, err = mutator.ProcessInclude(fullPath, relPath).Apply(context.Background(), bundle)
|
||||
err = mutator.ProcessInclude(fullPath, relPath).Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "bar", bundle.Config.Workspace.Host)
|
||||
}
|
||||
|
|
|
@ -22,7 +22,7 @@ func (m *processRootIncludes) Name() string {
|
|||
return "ProcessRootIncludes"
|
||||
}
|
||||
|
||||
func (m *processRootIncludes) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
var out []bundle.Mutator
|
||||
|
||||
// Map with files we've already seen to avoid loading them twice.
|
||||
|
@ -40,13 +40,13 @@ func (m *processRootIncludes) Apply(_ context.Context, b *bundle.Bundle) ([]bund
|
|||
for _, entry := range b.Config.Include {
|
||||
// Include paths must be relative.
|
||||
if filepath.IsAbs(entry) {
|
||||
return nil, fmt.Errorf("%s: includes must be relative paths", entry)
|
||||
return fmt.Errorf("%s: includes must be relative paths", entry)
|
||||
}
|
||||
|
||||
// Anchor includes to the bundle root path.
|
||||
matches, err := filepath.Glob(filepath.Join(b.Config.Path, entry))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Filter matches to ones we haven't seen yet.
|
||||
|
@ -54,7 +54,7 @@ func (m *processRootIncludes) Apply(_ context.Context, b *bundle.Bundle) ([]bund
|
|||
for _, match := range matches {
|
||||
rel, err := filepath.Rel(b.Config.Path, match)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if _, ok := seen[rel]; ok {
|
||||
continue
|
||||
|
@ -74,5 +74,5 @@ func (m *processRootIncludes) Apply(_ context.Context, b *bundle.Bundle) ([]bund
|
|||
// Swap out the original includes list with the expanded globs.
|
||||
b.Config.Include = files
|
||||
|
||||
return out, nil
|
||||
return bundle.Apply(ctx, b, bundle.Seq(out...))
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ func TestProcessRootIncludesEmpty(t *testing.T) {
|
|||
Path: ".",
|
||||
},
|
||||
}
|
||||
_, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
|
@ -46,7 +46,7 @@ func TestProcessRootIncludesAbs(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "must be relative paths")
|
||||
}
|
||||
|
@ -65,17 +65,9 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
|||
touch(t, bundle.Config.Path, "a.yml")
|
||||
touch(t, bundle.Config.Path, "b.yml")
|
||||
|
||||
ms, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
|
||||
var names []string
|
||||
for _, m := range ms {
|
||||
names = append(names, m.Name())
|
||||
}
|
||||
|
||||
assert.NotContains(t, names, "ProcessInclude(bundle.yml)")
|
||||
assert.Contains(t, names, "ProcessInclude(a.yml)")
|
||||
assert.Contains(t, names, "ProcessInclude(b.yml)")
|
||||
assert.Equal(t, []string{"a.yml", "b.yml"}, bundle.Config.Include)
|
||||
}
|
||||
|
||||
|
@ -93,16 +85,9 @@ func TestProcessRootIncludesMultiGlob(t *testing.T) {
|
|||
touch(t, bundle.Config.Path, "a1.yml")
|
||||
touch(t, bundle.Config.Path, "b1.yml")
|
||||
|
||||
ms, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
|
||||
var names []string
|
||||
for _, m := range ms {
|
||||
names = append(names, m.Name())
|
||||
}
|
||||
|
||||
assert.Contains(t, names, "ProcessInclude(a1.yml)")
|
||||
assert.Contains(t, names, "ProcessInclude(b1.yml)")
|
||||
assert.Equal(t, []string{"a1.yml", "b1.yml"}, bundle.Config.Include)
|
||||
}
|
||||
|
||||
|
@ -119,9 +104,7 @@ func TestProcessRootIncludesRemoveDups(t *testing.T) {
|
|||
|
||||
touch(t, bundle.Config.Path, "a.yml")
|
||||
|
||||
ms, err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, ms, 1)
|
||||
assert.Equal(t, "ProcessInclude(a.yml)", ms[0].Name())
|
||||
assert.Equal(t, []string{"a.yml"}, bundle.Config.Include)
|
||||
}
|
||||
|
|
|
@ -20,15 +20,15 @@ func (m *selectDefaultEnvironment) Name() string {
|
|||
return "SelectDefaultEnvironment"
|
||||
}
|
||||
|
||||
func (m *selectDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *selectDefaultEnvironment) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
if len(b.Config.Environments) == 0 {
|
||||
return nil, fmt.Errorf("no environments defined")
|
||||
return fmt.Errorf("no environments defined")
|
||||
}
|
||||
|
||||
// One environment means there's only one default.
|
||||
names := maps.Keys(b.Config.Environments)
|
||||
if len(names) == 1 {
|
||||
return []bundle.Mutator{SelectEnvironment(names[0])}, nil
|
||||
return SelectEnvironment(names[0]).Apply(ctx, b)
|
||||
}
|
||||
|
||||
// Multiple environments means we look for the `default` flag.
|
||||
|
@ -41,14 +41,14 @@ func (m *selectDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([
|
|||
|
||||
// It is invalid to have multiple environments with the `default` flag set.
|
||||
if len(defaults) > 1 {
|
||||
return nil, fmt.Errorf("multiple environments are marked as default (%s)", strings.Join(defaults, ", "))
|
||||
return fmt.Errorf("multiple environments are marked as default (%s)", strings.Join(defaults, ", "))
|
||||
}
|
||||
|
||||
// If no environment has the `default` flag set, ask the user to specify one.
|
||||
if len(defaults) == 0 {
|
||||
return nil, fmt.Errorf("please specify environment")
|
||||
return fmt.Errorf("please specify environment")
|
||||
}
|
||||
|
||||
// One default remaining.
|
||||
return []bundle.Mutator{SelectEnvironment(defaults[0])}, nil
|
||||
return SelectEnvironment(defaults[0]).Apply(ctx, b)
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ func TestSelectDefaultEnvironmentNoEnvironments(t *testing.T) {
|
|||
Environments: map[string]*config.Environment{},
|
||||
},
|
||||
}
|
||||
_, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "no environments defined")
|
||||
}
|
||||
|
||||
|
@ -28,10 +28,9 @@ func TestSelectDefaultEnvironmentSingleEnvironments(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
ms, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, ms, 1)
|
||||
assert.Equal(t, "SelectEnvironment(foo)", ms[0].Name())
|
||||
assert.Equal(t, "foo", bundle.Config.Bundle.Environment)
|
||||
}
|
||||
|
||||
func TestSelectDefaultEnvironmentNoDefaults(t *testing.T) {
|
||||
|
@ -44,7 +43,7 @@ func TestSelectDefaultEnvironmentNoDefaults(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "please specify environment")
|
||||
}
|
||||
|
||||
|
@ -57,7 +56,7 @@ func TestSelectDefaultEnvironmentNoDefaultsWithNil(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "please specify environment")
|
||||
}
|
||||
|
||||
|
@ -71,7 +70,7 @@ func TestSelectDefaultEnvironmentMultipleDefaults(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "multiple environments are marked as default")
|
||||
}
|
||||
|
||||
|
@ -85,8 +84,7 @@ func TestSelectDefaultEnvironmentSingleDefault(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
ms, err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, ms, 1)
|
||||
assert.Equal(t, "SelectEnvironment(bar)", ms[0].Name())
|
||||
assert.Equal(t, "bar", bundle.Config.Bundle.Environment)
|
||||
}
|
||||
|
|
|
@ -22,21 +22,21 @@ func (m *selectEnvironment) Name() string {
|
|||
return fmt.Sprintf("SelectEnvironment(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *selectEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *selectEnvironment) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
if b.Config.Environments == nil {
|
||||
return nil, fmt.Errorf("no environments defined")
|
||||
return fmt.Errorf("no environments defined")
|
||||
}
|
||||
|
||||
// Get specified environment
|
||||
env, ok := b.Config.Environments[m.name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("%s: no such environment", m.name)
|
||||
return fmt.Errorf("%s: no such environment", m.name)
|
||||
}
|
||||
|
||||
// Merge specified environment into root configuration structure.
|
||||
err := b.Config.MergeEnvironment(env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Store specified environment in configuration for reference.
|
||||
|
@ -44,5 +44,5 @@ func (m *selectEnvironment) Apply(_ context.Context, b *bundle.Bundle) ([]bundle
|
|||
|
||||
// Clear environments after loading.
|
||||
b.Config.Environments = nil
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ func TestSelectEnvironment(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.SelectEnvironment("default").Apply(context.Background(), bundle)
|
||||
err := mutator.SelectEnvironment("default").Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "bar", bundle.Config.Workspace.Host)
|
||||
}
|
||||
|
@ -39,6 +39,6 @@ func TestSelectEnvironmentNotFound(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
_, err := mutator.SelectEnvironment("doesnt-exist").Apply(context.Background(), bundle)
|
||||
err := mutator.SelectEnvironment("doesnt-exist").Apply(context.Background(), bundle)
|
||||
require.Error(t, err, "no environments defined")
|
||||
}
|
||||
|
|
|
@ -52,12 +52,12 @@ func setVariable(v *variable.Variable, name string) error {
|
|||
return fmt.Errorf(`no value assigned to required variable %s. Assignment can be done through the "--var" flag or by setting the %s environment variable`, name, bundleVarPrefix+name)
|
||||
}
|
||||
|
||||
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
for name, variable := range b.Config.Variables {
|
||||
err := setVariable(variable, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -108,7 +108,7 @@ func TestSetVariablesMutator(t *testing.T) {
|
|||
|
||||
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
||||
|
||||
_, err := SetVariables().Apply(context.Background(), bundle)
|
||||
err := SetVariables().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "default-a", *bundle.Config.Variables["a"].Value)
|
||||
assert.Equal(t, "env-var-b", *bundle.Config.Variables["b"].Value)
|
||||
|
|
|
@ -145,19 +145,24 @@ func (m *translatePaths) translatePipelineLibrary(dir string, b *bundle.Bundle,
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
m.seen = make(map[string]string)
|
||||
|
||||
for key, job := range b.Config.Resources.Jobs {
|
||||
dir, err := job.ConfigFileDirectory()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||
return fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||
}
|
||||
|
||||
// Do not translate job task paths if using git source
|
||||
if job.GitSource != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := 0; i < len(job.Tasks); i++ {
|
||||
err := m.translateJobTask(dir, b, &job.Tasks[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -165,16 +170,16 @@ func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) ([]bundle.Mu
|
|||
for key, pipeline := range b.Config.Resources.Pipelines {
|
||||
dir, err := pipeline.ConfigFileDirectory()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
||||
return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
||||
}
|
||||
|
||||
for i := 0; i < len(pipeline.Libraries); i++ {
|
||||
err := m.translatePipelineLibrary(dir, b, &pipeline.Libraries[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -31,6 +31,73 @@ func touchEmptyFile(t *testing.T, path string) {
|
|||
f.Close()
|
||||
}
|
||||
|
||||
func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: dir,
|
||||
Workspace: config.Workspace{
|
||||
FilesPath: "/bundle",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
|
||||
Paths: resources.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
GitSource: &jobs.GitSource{
|
||||
GitBranch: "somebranch",
|
||||
GitCommit: "somecommit",
|
||||
GitProvider: "github",
|
||||
GitTag: "sometag",
|
||||
GitUrl: "https://github.com/someuser/somerepo",
|
||||
},
|
||||
Tasks: []jobs.JobTaskSettings{
|
||||
{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "my_job_notebook.py",
|
||||
},
|
||||
},
|
||||
{
|
||||
PythonWheelTask: &jobs.PythonWheelTask{
|
||||
PackageName: "foo",
|
||||
},
|
||||
},
|
||||
{
|
||||
SparkPythonTask: &jobs.SparkPythonTask{
|
||||
PythonFile: "my_python_file.py",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
"my_job_notebook.py",
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"foo",
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[1].PythonWheelTask.PackageName,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"my_python_file.py",
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[2].SparkPythonTask.PythonFile,
|
||||
)
|
||||
}
|
||||
|
||||
func TestTranslatePaths(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
touchNotebookFile(t, filepath.Join(dir, "my_job_notebook.py"))
|
||||
|
@ -118,7 +185,7 @@ func TestTranslatePaths(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that the path in the tasks now refer to the artifact.
|
||||
|
@ -215,7 +282,7 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(
|
||||
|
@ -261,7 +328,7 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "is not contained in bundle root")
|
||||
}
|
||||
|
||||
|
@ -292,7 +359,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
||||
}
|
||||
|
||||
|
@ -323,7 +390,7 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
||||
}
|
||||
|
||||
|
@ -354,7 +421,7 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
assert.EqualError(t, err, "notebook ./doesnt_exist.py not found")
|
||||
}
|
||||
|
||||
|
@ -385,6 +452,6 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
_, err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
err := mutator.TranslatePaths().Apply(context.Background(), bundle)
|
||||
assert.EqualError(t, err, "file ./doesnt_exist.py not found")
|
||||
}
|
||||
|
|
|
@ -78,6 +78,9 @@ func (r *Root) SetConfigFilePath(path string) {
|
|||
r.Resources.SetConfigFilePath(path)
|
||||
if r.Environments != nil {
|
||||
for _, env := range r.Environments {
|
||||
if env == nil {
|
||||
continue
|
||||
}
|
||||
if env.Resources != nil {
|
||||
env.Resources.SetConfigFilePath(path)
|
||||
}
|
||||
|
|
|
@ -97,9 +97,9 @@ func (w *Workspace) Client() (*databricks.WorkspaceClient, error) {
|
|||
func init() {
|
||||
arg0 := os.Args[0]
|
||||
|
||||
// Configure BRICKS_CLI_PATH only if our caller intends to use this specific version of this binary.
|
||||
// Configure DATABRICKS_CLI_PATH only if our caller intends to use this specific version of this binary.
|
||||
// Otherwise, if it is equal to its basename, processes can find it in $PATH.
|
||||
if arg0 != filepath.Base(arg0) {
|
||||
os.Setenv("BRICKS_CLI_PATH", arg0)
|
||||
os.Setenv("DATABRICKS_CLI_PATH", arg0)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -7,29 +7,27 @@ import (
|
|||
)
|
||||
|
||||
type DeferredMutator struct {
|
||||
mutators []Mutator
|
||||
finally []Mutator
|
||||
mutator Mutator
|
||||
finally Mutator
|
||||
}
|
||||
|
||||
func (d *DeferredMutator) Name() string {
|
||||
return "deferred"
|
||||
}
|
||||
|
||||
func Defer(mutators []Mutator, finally []Mutator) []Mutator {
|
||||
return []Mutator{
|
||||
&DeferredMutator{
|
||||
mutators: mutators,
|
||||
finally: finally,
|
||||
},
|
||||
func Defer(mutator Mutator, finally Mutator) Mutator {
|
||||
return &DeferredMutator{
|
||||
mutator: mutator,
|
||||
finally: finally,
|
||||
}
|
||||
}
|
||||
|
||||
func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) ([]Mutator, error) {
|
||||
mainErr := Apply(ctx, b, d.mutators)
|
||||
func (d *DeferredMutator) Apply(ctx context.Context, b *Bundle) error {
|
||||
mainErr := Apply(ctx, b, d.mutator)
|
||||
errOnFinish := Apply(ctx, b, d.finally)
|
||||
if mainErr != nil || errOnFinish != nil {
|
||||
return nil, errs.FromMany(mainErr, errOnFinish)
|
||||
return errs.FromMany(mainErr, errOnFinish)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -17,9 +17,9 @@ func (t *mutatorWithError) Name() string {
|
|||
return "mutatorWithError"
|
||||
}
|
||||
|
||||
func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) ([]Mutator, error) {
|
||||
func (t *mutatorWithError) Apply(_ context.Context, b *Bundle) error {
|
||||
t.applyCalled++
|
||||
return nil, fmt.Errorf(t.errorMsg)
|
||||
return fmt.Errorf(t.errorMsg)
|
||||
}
|
||||
|
||||
func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {
|
||||
|
@ -27,7 +27,7 @@ func TestDeferredMutatorWhenAllMutatorsSucceed(t *testing.T) {
|
|||
m2 := &testMutator{}
|
||||
m3 := &testMutator{}
|
||||
cleanup := &testMutator{}
|
||||
deferredMutator := Defer([]Mutator{m1, m2, m3}, []Mutator{cleanup})
|
||||
deferredMutator := Defer(Seq(m1, m2, m3), cleanup)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, deferredMutator)
|
||||
|
@ -44,7 +44,7 @@ func TestDeferredMutatorWhenFirstFails(t *testing.T) {
|
|||
m2 := &testMutator{}
|
||||
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
||||
cleanup := &testMutator{}
|
||||
deferredMutator := Defer([]Mutator{mErr, m1, m2}, []Mutator{cleanup})
|
||||
deferredMutator := Defer(Seq(mErr, m1, m2), cleanup)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, deferredMutator)
|
||||
|
@ -61,7 +61,7 @@ func TestDeferredMutatorWhenMiddleOneFails(t *testing.T) {
|
|||
m2 := &testMutator{}
|
||||
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
||||
cleanup := &testMutator{}
|
||||
deferredMutator := Defer([]Mutator{m1, mErr, m2}, []Mutator{cleanup})
|
||||
deferredMutator := Defer(Seq(m1, mErr, m2), cleanup)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, deferredMutator)
|
||||
|
@ -78,7 +78,7 @@ func TestDeferredMutatorWhenLastOneFails(t *testing.T) {
|
|||
m2 := &testMutator{}
|
||||
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
||||
cleanup := &testMutator{}
|
||||
deferredMutator := Defer([]Mutator{m1, m2, mErr}, []Mutator{cleanup})
|
||||
deferredMutator := Defer(Seq(m1, m2, mErr), cleanup)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, deferredMutator)
|
||||
|
@ -95,7 +95,7 @@ func TestDeferredMutatorCombinesErrorMessages(t *testing.T) {
|
|||
m2 := &testMutator{}
|
||||
mErr := &mutatorWithError{errorMsg: "mutator error occurred"}
|
||||
cleanupErr := &mutatorWithError{errorMsg: "cleanup error occurred"}
|
||||
deferredMutator := Defer([]Mutator{m1, m2, mErr}, []Mutator{cleanupErr})
|
||||
deferredMutator := Defer(Seq(m1, m2, mErr), cleanupErr)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, deferredMutator)
|
||||
|
|
|
@ -16,10 +16,10 @@ func (m *delete) Name() string {
|
|||
return "files.Delete"
|
||||
}
|
||||
|
||||
func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
// Do not delete files if terraform destroy was not consented
|
||||
if !b.Plan.IsEmpty && !b.Plan.ConfirmApply {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Starting deletion of remote bundle files")
|
||||
|
@ -29,10 +29,10 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator,
|
|||
if !b.AutoApprove {
|
||||
proceed, err := cmdio.Ask(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?: ", b.Config.Workspace.RootPath, red("deleted permanently!")))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
if !proceed {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -41,22 +41,22 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator,
|
|||
Recursive: true,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Clean up sync snapshot file
|
||||
sync, err := getSync(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
err = sync.DestroySnapshot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Deleted snapshot file at %s", sync.SnapshotPath()))
|
||||
cmdio.LogString(ctx, "Successfully deleted files!")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func Delete() bundle.Mutator {
|
||||
|
|
|
@ -14,20 +14,20 @@ func (m *upload) Name() string {
|
|||
return "files.Upload"
|
||||
}
|
||||
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
cmdio.LogString(ctx, "Starting upload of bundle files")
|
||||
sync, err := getSync(ctx, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
err = sync.RunOnce(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Uploaded bundle files at %s!\n", b.Config.Workspace.FilesPath))
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func Upload() bundle.Mutator {
|
||||
|
|
|
@ -30,16 +30,16 @@ func (m *acquire) init(b *bundle.Bundle) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
// Return early if locking is disabled.
|
||||
if !b.Config.Bundle.Lock.IsEnabled() {
|
||||
log.Infof(ctx, "Skipping; locking is disabled")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
err := m.init(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
force := b.Config.Bundle.Lock.Force
|
||||
|
@ -47,8 +47,8 @@ func (m *acquire) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator
|
|||
err = b.Locker.Lock(ctx, force)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "Failed to acquire deployment lock: %v", err)
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -2,41 +2,53 @@ package lock
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/locker"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
type release struct{}
|
||||
type Goal string
|
||||
|
||||
func Release() bundle.Mutator {
|
||||
return &release{}
|
||||
const (
|
||||
GoalDeploy = Goal("deploy")
|
||||
GoalDestroy = Goal("destroy")
|
||||
)
|
||||
|
||||
type release struct {
|
||||
goal Goal
|
||||
}
|
||||
|
||||
func Release(goal Goal) bundle.Mutator {
|
||||
return &release{goal}
|
||||
}
|
||||
|
||||
func (m *release) Name() string {
|
||||
return "lock:release"
|
||||
}
|
||||
|
||||
func (m *release) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
// Return early if locking is disabled.
|
||||
if !b.Config.Bundle.Lock.IsEnabled() {
|
||||
log.Infof(ctx, "Skipping; locking is disabled")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Return early if the locker is not set.
|
||||
// It is likely an error occurred prior to initialization of the locker instance.
|
||||
if b.Locker == nil {
|
||||
log.Warnf(ctx, "Unable to release lock if locker is not configured")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Infof(ctx, "Releasing deployment lock")
|
||||
err := b.Locker.Unlock(ctx)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "Failed to release deployment lock: %v", err)
|
||||
return nil, err
|
||||
switch m.goal {
|
||||
case GoalDeploy:
|
||||
return b.Locker.Unlock(ctx)
|
||||
case GoalDestroy:
|
||||
return b.Locker.Unlock(ctx, locker.AllowLockFileNotExist)
|
||||
default:
|
||||
return fmt.Errorf("unknown goal for lock release: %s", m.goal)
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
}
|
||||
|
|
|
@ -15,26 +15,26 @@ func (w *apply) Name() string {
|
|||
return "terraform.Apply"
|
||||
}
|
||||
|
||||
func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (w *apply) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
tf := b.Terraform
|
||||
if tf == nil {
|
||||
return nil, fmt.Errorf("terraform not initialized")
|
||||
return fmt.Errorf("terraform not initialized")
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Starting resource deployment")
|
||||
|
||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("terraform init: %w", err)
|
||||
return fmt.Errorf("terraform init: %w", err)
|
||||
}
|
||||
|
||||
err = tf.Apply(ctx)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("terraform apply: %w", err)
|
||||
return fmt.Errorf("terraform apply: %w", err)
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Resource deployment completed!")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Apply returns a [bundle.Mutator] that runs the equivalent of `terraform apply`
|
||||
|
|
|
@ -62,28 +62,28 @@ func (w *destroy) Name() string {
|
|||
return "terraform.Destroy"
|
||||
}
|
||||
|
||||
func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
// return early if plan is empty
|
||||
if b.Plan.IsEmpty {
|
||||
cmdio.LogString(ctx, "No resources to destroy in plan. Skipping destroy!")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
tf := b.Terraform
|
||||
if tf == nil {
|
||||
return nil, fmt.Errorf("terraform not initialized")
|
||||
return fmt.Errorf("terraform not initialized")
|
||||
}
|
||||
|
||||
// read plan file
|
||||
plan, err := tf.ShowPlanFile(ctx, b.Plan.Path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// print the resources that will be destroyed
|
||||
err = logDestroyPlan(ctx, plan.ResourceChanges)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Ask for confirmation, if needed
|
||||
|
@ -91,17 +91,17 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator
|
|||
red := color.New(color.FgRed).SprintFunc()
|
||||
b.Plan.ConfirmApply, err = cmdio.Ask(ctx, fmt.Sprintf("\nThis will permanently %s resources! Proceed? [y/n]: ", red("destroy")))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// return if confirmation was not provided
|
||||
if !b.Plan.ConfirmApply {
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.Plan.Path == "" {
|
||||
return nil, fmt.Errorf("no plan found")
|
||||
return fmt.Errorf("no plan found")
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Starting to destroy resources")
|
||||
|
@ -109,11 +109,11 @@ func (w *destroy) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator
|
|||
// Apply terraform according to the computed destroy plan
|
||||
err = tf.Apply(ctx, tfexec.DirOrPlan(b.Plan.Path))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("terraform destroy: %w", err)
|
||||
return fmt.Errorf("terraform destroy: %w", err)
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Successfully destroyed resources!")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Destroy returns a [bundle.Mutator] that runs the conceptual equivalent of
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -69,7 +70,55 @@ func (m *initialize) findExecPath(ctx context.Context, b *bundle.Bundle, tf *con
|
|||
return tf.ExecPath, nil
|
||||
}
|
||||
|
||||
func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
// This function sets temp dir location for terraform to use. If user does not
|
||||
// specify anything here, we fall back to a `tmp` directory in the bundle's cache
|
||||
// directory
|
||||
//
|
||||
// This is necessary to avoid trying to create temporary files in directories
|
||||
// the CLI and its dependencies do not have access to.
|
||||
//
|
||||
// see: os.TempDir for more context
|
||||
func setTempDirEnvVars(env map[string]string, b *bundle.Bundle) error {
|
||||
switch runtime.GOOS {
|
||||
case "windows":
|
||||
if v, ok := os.LookupEnv("TMP"); ok {
|
||||
env["TMP"] = v
|
||||
} else if v, ok := os.LookupEnv("TEMP"); ok {
|
||||
env["TEMP"] = v
|
||||
} else if v, ok := os.LookupEnv("USERPROFILE"); ok {
|
||||
env["USERPROFILE"] = v
|
||||
} else {
|
||||
tmpDir, err := b.CacheDir("tmp")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
env["TMP"] = tmpDir
|
||||
}
|
||||
default:
|
||||
// If TMPDIR is not set, we let the process fall back to its default value.
|
||||
if v, ok := os.LookupEnv("TMPDIR"); ok {
|
||||
env["TMPDIR"] = v
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// This function passes through all proxy related environment variables.
|
||||
func setProxyEnvVars(env map[string]string, b *bundle.Bundle) error {
|
||||
for _, v := range []string{"http_proxy", "https_proxy", "no_proxy"} {
|
||||
// The case (upper or lower) is notoriously inconsistent for tools on Unix systems.
|
||||
// We therefore try to read both the upper and lower case versions of the variable.
|
||||
for _, v := range []string{strings.ToUpper(v), strings.ToLower(v)} {
|
||||
if val, ok := os.LookupEnv(v); ok {
|
||||
// Only set uppercase version of the variable.
|
||||
env[strings.ToUpper(v)] = val
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
tfConfig := b.Config.Bundle.Terraform
|
||||
if tfConfig == nil {
|
||||
tfConfig = &config.Terraform{}
|
||||
|
@ -78,22 +127,22 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Muta
|
|||
|
||||
execPath, err := m.findExecPath(ctx, b, tfConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
workingDir, err := Dir(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
tf, err := tfexec.NewTerraform(workingDir, execPath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
env, err := b.AuthEnv()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Include $HOME in set of environment variables to pass along.
|
||||
|
@ -102,15 +151,27 @@ func (m *initialize) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Muta
|
|||
env["HOME"] = home
|
||||
}
|
||||
|
||||
// Set the temporary directory environment variables
|
||||
err = setTempDirEnvVars(env, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Set the proxy related environment variables
|
||||
err = setProxyEnvVars(env, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Configure environment variables for auth for Terraform to use.
|
||||
log.Debugf(ctx, "Environment variables for Terraform: %s", strings.Join(maps.Keys(env), ", "))
|
||||
err = tf.SetEnv(env)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
b.Terraform = tf
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func Initialize() bundle.Mutator {
|
||||
|
|
|
@ -2,14 +2,25 @@ package terraform
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
func unsetEnv(t *testing.T, name string) {
|
||||
t.Setenv(name, "")
|
||||
err := os.Unsetenv(name)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestInitEnvironmentVariables(t *testing.T) {
|
||||
_, err := exec.LookPath("terraform")
|
||||
if err != nil {
|
||||
|
@ -34,6 +45,230 @@ func TestInitEnvironmentVariables(t *testing.T) {
|
|||
t.Setenv("DATABRICKS_TOKEN", "foobar")
|
||||
bundle.WorkspaceClient()
|
||||
|
||||
_, err = Initialize().Apply(context.Background(), bundle)
|
||||
err = Initialize().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestSetTempDirEnvVarsForUnixWithTmpDirSet(t *testing.T) {
|
||||
if runtime.GOOS != "darwin" && runtime.GOOS != "linux" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: t.TempDir(),
|
||||
Bundle: config.Bundle{
|
||||
Environment: "whatever",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Set TMPDIR environment variable
|
||||
t.Setenv("TMPDIR", "/foo/bar")
|
||||
|
||||
// compute env
|
||||
env := make(map[string]string, 0)
|
||||
err := setTempDirEnvVars(env, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that we pass through TMPDIR.
|
||||
assert.Equal(t, map[string]string{
|
||||
"TMPDIR": "/foo/bar",
|
||||
}, env)
|
||||
}
|
||||
|
||||
func TestSetTempDirEnvVarsForUnixWithTmpDirNotSet(t *testing.T) {
|
||||
if runtime.GOOS != "darwin" && runtime.GOOS != "linux" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: t.TempDir(),
|
||||
Bundle: config.Bundle{
|
||||
Environment: "whatever",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Unset TMPDIR environment variable confirm it's not set
|
||||
unsetEnv(t, "TMPDIR")
|
||||
|
||||
// compute env
|
||||
env := make(map[string]string, 0)
|
||||
err := setTempDirEnvVars(env, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Assert that we don't pass through TMPDIR.
|
||||
assert.Equal(t, map[string]string{}, env)
|
||||
}
|
||||
|
||||
func TestSetTempDirEnvVarsForWindowWithAllTmpDirEnvVarsSet(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: t.TempDir(),
|
||||
Bundle: config.Bundle{
|
||||
Environment: "whatever",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Set environment variables
|
||||
t.Setenv("TMP", "c:\\foo\\a")
|
||||
t.Setenv("TEMP", "c:\\foo\\b")
|
||||
t.Setenv("USERPROFILE", "c:\\foo\\c")
|
||||
|
||||
// compute env
|
||||
env := make(map[string]string, 0)
|
||||
err := setTempDirEnvVars(env, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// assert that we pass through the highest priority env var value
|
||||
assert.Equal(t, map[string]string{
|
||||
"TMP": "c:\\foo\\a",
|
||||
}, env)
|
||||
}
|
||||
|
||||
func TestSetTempDirEnvVarsForWindowWithUserProfileAndTempSet(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: t.TempDir(),
|
||||
Bundle: config.Bundle{
|
||||
Environment: "whatever",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Set environment variables
|
||||
unsetEnv(t, "TMP")
|
||||
t.Setenv("TEMP", "c:\\foo\\b")
|
||||
t.Setenv("USERPROFILE", "c:\\foo\\c")
|
||||
|
||||
// compute env
|
||||
env := make(map[string]string, 0)
|
||||
err := setTempDirEnvVars(env, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// assert that we pass through the highest priority env var value
|
||||
assert.Equal(t, map[string]string{
|
||||
"TEMP": "c:\\foo\\b",
|
||||
}, env)
|
||||
}
|
||||
|
||||
func TestSetTempDirEnvVarsForWindowWithUserProfileSet(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: t.TempDir(),
|
||||
Bundle: config.Bundle{
|
||||
Environment: "whatever",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Set environment variables
|
||||
unsetEnv(t, "TMP")
|
||||
unsetEnv(t, "TEMP")
|
||||
t.Setenv("USERPROFILE", "c:\\foo\\c")
|
||||
|
||||
// compute env
|
||||
env := make(map[string]string, 0)
|
||||
err := setTempDirEnvVars(env, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// assert that we pass through the user profile
|
||||
assert.Equal(t, map[string]string{
|
||||
"USERPROFILE": "c:\\foo\\c",
|
||||
}, env)
|
||||
}
|
||||
|
||||
func TestSetTempDirEnvVarsForWindowsWithoutAnyTempDirEnvVarsSet(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.SkipNow()
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: t.TempDir(),
|
||||
Bundle: config.Bundle{
|
||||
Environment: "whatever",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// unset all env vars
|
||||
unsetEnv(t, "TMP")
|
||||
unsetEnv(t, "TEMP")
|
||||
unsetEnv(t, "USERPROFILE")
|
||||
|
||||
// compute env
|
||||
env := make(map[string]string, 0)
|
||||
err := setTempDirEnvVars(env, b)
|
||||
require.NoError(t, err)
|
||||
|
||||
// assert TMP is set to b.CacheDir("tmp")
|
||||
tmpDir, err := b.CacheDir("tmp")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, map[string]string{
|
||||
"TMP": tmpDir,
|
||||
}, env)
|
||||
}
|
||||
|
||||
func TestSetProxyEnvVars(t *testing.T) {
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: t.TempDir(),
|
||||
Bundle: config.Bundle{
|
||||
Environment: "whatever",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Temporarily clear environment variables.
|
||||
clearEnv := func() {
|
||||
for _, v := range []string{"http_proxy", "https_proxy", "no_proxy"} {
|
||||
for _, v := range []string{strings.ToUpper(v), strings.ToLower(v)} {
|
||||
t.Setenv(v, "foo")
|
||||
os.Unsetenv(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// No proxy env vars set.
|
||||
clearEnv()
|
||||
env := make(map[string]string, 0)
|
||||
err := setProxyEnvVars(env, b)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, env, 0)
|
||||
|
||||
// Lower case set.
|
||||
clearEnv()
|
||||
t.Setenv("http_proxy", "foo")
|
||||
t.Setenv("https_proxy", "foo")
|
||||
t.Setenv("no_proxy", "foo")
|
||||
env = make(map[string]string, 0)
|
||||
err = setProxyEnvVars(env, b)
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env))
|
||||
|
||||
// Upper case set.
|
||||
clearEnv()
|
||||
t.Setenv("HTTP_PROXY", "foo")
|
||||
t.Setenv("HTTPS_PROXY", "foo")
|
||||
t.Setenv("NO_PROXY", "foo")
|
||||
env = make(map[string]string, 0)
|
||||
err = setProxyEnvVars(env, b)
|
||||
require.NoError(t, err)
|
||||
assert.ElementsMatch(t, []string{"HTTP_PROXY", "HTTPS_PROXY", "NO_PROXY"}, maps.Keys(env))
|
||||
}
|
||||
|
|
|
@ -15,34 +15,34 @@ func (l *load) Name() string {
|
|||
return "terraform.Load"
|
||||
}
|
||||
|
||||
func (l *load) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (l *load) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
tf := b.Terraform
|
||||
if tf == nil {
|
||||
return nil, fmt.Errorf("terraform not initialized")
|
||||
return fmt.Errorf("terraform not initialized")
|
||||
}
|
||||
|
||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("terraform init: %w", err)
|
||||
return fmt.Errorf("terraform init: %w", err)
|
||||
}
|
||||
|
||||
state, err := b.Terraform.Show(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
err = ValidateState(state)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Merge state into configuration.
|
||||
err = TerraformToBundle(state, &b.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func ValidateState(state *tfjson.State) error {
|
||||
|
|
|
@ -32,10 +32,10 @@ func TestLoadWithNoState(t *testing.T) {
|
|||
t.Setenv("DATABRICKS_TOKEN", "foobar")
|
||||
b.WorkspaceClient()
|
||||
|
||||
err = bundle.Apply(context.Background(), b, []bundle.Mutator{
|
||||
err = bundle.Apply(context.Background(), b, bundle.Seq(
|
||||
Initialize(),
|
||||
Load(),
|
||||
})
|
||||
))
|
||||
|
||||
require.ErrorContains(t, err, "Did you forget to run 'databricks bundle deploy'")
|
||||
}
|
||||
|
|
|
@ -26,30 +26,30 @@ func (p *plan) Name() string {
|
|||
return "terraform.Plan"
|
||||
}
|
||||
|
||||
func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
tf := b.Terraform
|
||||
if tf == nil {
|
||||
return nil, fmt.Errorf("terraform not initialized")
|
||||
return fmt.Errorf("terraform not initialized")
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "Starting plan computation")
|
||||
|
||||
err := tf.Init(ctx, tfexec.Upgrade(true))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("terraform init: %w", err)
|
||||
return fmt.Errorf("terraform init: %w", err)
|
||||
}
|
||||
|
||||
// Persist computed plan
|
||||
tfDir, err := Dir(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
planPath := filepath.Join(tfDir, "plan")
|
||||
destroy := p.goal == PlanDestroy
|
||||
|
||||
notEmpty, err := tf.Plan(ctx, tfexec.Destroy(destroy), tfexec.Out(planPath))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Set plan in main bundle struct for downstream mutators
|
||||
|
@ -60,7 +60,7 @@ func (p *plan) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, e
|
|||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Planning complete and persisted at %s\n", planPath))
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Plan returns a [bundle.Mutator] that runs the equivalent of `terraform plan -out ./plan`
|
||||
|
|
|
@ -2,14 +2,15 @@ package terraform
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go/apierr"
|
||||
)
|
||||
|
||||
type statePull struct{}
|
||||
|
@ -18,15 +19,15 @@ func (l *statePull) Name() string {
|
|||
return "terraform:state-pull"
|
||||
}
|
||||
|
||||
func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
dir, err := Dir(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Download state file from filer to local cache directory.
|
||||
|
@ -34,23 +35,23 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutat
|
|||
remote, err := f.Read(ctx, TerraformStateFileName)
|
||||
if err != nil {
|
||||
// On first deploy this state file doesn't yet exist.
|
||||
if apierr.IsMissing(err) {
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
log.Infof(ctx, "Remote state file does not exist")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Expect the state file to live under dir.
|
||||
local, err := os.OpenFile(filepath.Join(dir, TerraformStateFileName), os.O_CREATE|os.O_RDWR, 0600)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
defer local.Close()
|
||||
|
||||
if !IsLocalStateStale(local, remote) {
|
||||
log.Infof(ctx, "Local state is the same or newer, ignoring remote state")
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Truncating the file before writing
|
||||
|
@ -61,10 +62,10 @@ func (l *statePull) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutat
|
|||
log.Infof(ctx, "Writing remote state file to local cache directory")
|
||||
_, err = io.Copy(local, remote)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func StatePull() bundle.Mutator {
|
||||
|
|
|
@ -16,31 +16,31 @@ func (l *statePush) Name() string {
|
|||
return "terraform:state-push"
|
||||
}
|
||||
|
||||
func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (l *statePush) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
f, err := filer.NewWorkspaceFilesClient(b.WorkspaceClient(), b.Config.Workspace.StatePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
dir, err := Dir(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Expect the state file to live under dir.
|
||||
local, err := os.Open(filepath.Join(dir, TerraformStateFileName))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
// Upload state file from local cache directory to filer.
|
||||
log.Infof(ctx, "Writing local state file to remote state directory")
|
||||
err = f.Write(ctx, TerraformStateFileName, local, filer.CreateParentDirectories, filer.OverwriteIfExists)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func StatePush() bundle.Mutator {
|
||||
|
|
|
@ -15,16 +15,16 @@ func (w *write) Name() string {
|
|||
return "terraform.Write"
|
||||
}
|
||||
|
||||
func (w *write) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (w *write) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
dir, err := Dir(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
root := BundleToTerraform(&b.Config)
|
||||
f, err := os.Create(filepath.Join(dir, "bundle.tf.json"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
defer f.Close()
|
||||
|
@ -33,10 +33,10 @@ func (w *write) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator,
|
|||
enc.SetIndent("", " ")
|
||||
err = enc.Encode(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return err
|
||||
}
|
||||
|
||||
return nil, nil
|
||||
return nil
|
||||
}
|
||||
|
||||
// Write returns a [bundle.Mutator] that converts resources in a bundle configuration
|
||||
|
|
|
@ -2,10 +2,12 @@ package deployer
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/libs/locker"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
|
@ -97,22 +99,25 @@ func (b *Deployer) tfStateLocalPath() string {
|
|||
return filepath.Join(b.DefaultTerraformRoot(), "terraform.tfstate")
|
||||
}
|
||||
|
||||
func (b *Deployer) LoadTerraformState(ctx context.Context) error {
|
||||
bytes, err := b.locker.GetRawJsonFileContent(ctx, b.tfStateRemotePath())
|
||||
if err != nil {
|
||||
func (d *Deployer) LoadTerraformState(ctx context.Context) error {
|
||||
r, err := d.locker.Read(ctx, d.tfStateRemotePath())
|
||||
if errors.Is(err, fs.ErrNotExist) {
|
||||
// If remote tf state is absent, use local tf state
|
||||
if strings.Contains(err.Error(), "File not found.") {
|
||||
return nil
|
||||
} else {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
err = os.MkdirAll(b.DefaultTerraformRoot(), os.ModeDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = os.WriteFile(b.tfStateLocalPath(), bytes, os.ModePerm)
|
||||
return err
|
||||
defer r.Close()
|
||||
err = os.MkdirAll(d.DefaultTerraformRoot(), os.ModeDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.WriteFile(d.tfStateLocalPath(), b, os.ModePerm)
|
||||
}
|
||||
|
||||
func (b *Deployer) SaveTerraformState(ctx context.Context) error {
|
||||
|
@ -120,7 +125,7 @@ func (b *Deployer) SaveTerraformState(ctx context.Context) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return b.locker.PutFile(ctx, b.tfStateRemotePath(), bytes)
|
||||
return b.locker.Write(ctx, b.tfStateRemotePath(), bytes)
|
||||
}
|
||||
|
||||
func (d *Deployer) Lock(ctx context.Context, isForced bool) error {
|
||||
|
|
|
@ -12,16 +12,17 @@ type Config struct {
|
|||
AzureTenantId string `json:"azure_tenant_id,omitempty"`
|
||||
AzureUseMsi bool `json:"azure_use_msi,omitempty"`
|
||||
AzureWorkspaceResourceId string `json:"azure_workspace_resource_id,omitempty"`
|
||||
BricksCliPath string `json:"bricks_cli_path,omitempty"`
|
||||
ClientId string `json:"client_id,omitempty"`
|
||||
ClientSecret string `json:"client_secret,omitempty"`
|
||||
ConfigFile string `json:"config_file,omitempty"`
|
||||
DatabricksCliPath string `json:"databricks_cli_path,omitempty"`
|
||||
DebugHeaders bool `json:"debug_headers,omitempty"`
|
||||
DebugTruncateBytes int `json:"debug_truncate_bytes,omitempty"`
|
||||
GoogleCredentials string `json:"google_credentials,omitempty"`
|
||||
GoogleServiceAccount string `json:"google_service_account,omitempty"`
|
||||
Host string `json:"host,omitempty"`
|
||||
HttpTimeoutSeconds int `json:"http_timeout_seconds,omitempty"`
|
||||
MetadataServiceUrl string `json:"metadata_service_url,omitempty"`
|
||||
Password string `json:"password,omitempty"`
|
||||
Profile string `json:"profile,omitempty"`
|
||||
RateLimit int `json:"rate_limit,omitempty"`
|
||||
|
|
|
@ -120,12 +120,17 @@ type DataSourceClusterClusterInfoInitScriptsS3 struct {
|
|||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceClusterClusterInfoInitScriptsWorkspace struct {
|
||||
Destination string `json:"destination,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceClusterClusterInfoInitScripts struct {
|
||||
Abfss *DataSourceClusterClusterInfoInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *DataSourceClusterClusterInfoInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *DataSourceClusterClusterInfoInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *DataSourceClusterClusterInfoInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *DataSourceClusterClusterInfoInitScriptsS3 `json:"s3,omitempty"`
|
||||
Abfss *DataSourceClusterClusterInfoInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *DataSourceClusterClusterInfoInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *DataSourceClusterClusterInfoInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *DataSourceClusterClusterInfoInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *DataSourceClusterClusterInfoInitScriptsS3 `json:"s3,omitempty"`
|
||||
Workspace *DataSourceClusterClusterInfoInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceClusterClusterInfoTerminationReason struct {
|
||||
|
|
|
@ -3,8 +3,12 @@
|
|||
package schema
|
||||
|
||||
type DataSourceClusterPolicy struct {
|
||||
Definition string `json:"definition,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Definition string `json:"definition,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
IsDefault bool `json:"is_default,omitempty"`
|
||||
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"`
|
||||
PolicyFamilyId string `json:"policy_family_id,omitempty"`
|
||||
}
|
||||
|
|
|
@ -127,12 +127,17 @@ type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 struct {
|
|||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsWorkspace struct {
|
||||
Destination string `json:"destination,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScripts struct {
|
||||
Abfss *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Abfss *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Workspace *DataSourceJobJobSettingsSettingsJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsJobClusterNewClusterWorkloadTypeClients struct {
|
||||
|
@ -303,12 +308,17 @@ type DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 struct {
|
|||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsNewClusterInitScriptsWorkspace struct {
|
||||
Destination string `json:"destination,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsNewClusterInitScripts struct {
|
||||
Abfss *DataSourceJobJobSettingsSettingsNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *DataSourceJobJobSettingsSettingsNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *DataSourceJobJobSettingsSettingsNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *DataSourceJobJobSettingsSettingsNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Abfss *DataSourceJobJobSettingsSettingsNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *DataSourceJobJobSettingsSettingsNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *DataSourceJobJobSettingsSettingsNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *DataSourceJobJobSettingsSettingsNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *DataSourceJobJobSettingsSettingsNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Workspace *DataSourceJobJobSettingsSettingsNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsNewClusterWorkloadTypeClients struct {
|
||||
|
@ -359,6 +369,11 @@ type DataSourceJobJobSettingsSettingsNotebookTask struct {
|
|||
Source string `json:"source,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsNotificationSettings struct {
|
||||
NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"`
|
||||
NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsPipelineTask struct {
|
||||
PipelineId string `json:"pipeline_id"`
|
||||
}
|
||||
|
@ -370,6 +385,14 @@ type DataSourceJobJobSettingsSettingsPythonWheelTask struct {
|
|||
Parameters []string `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsQueue struct {
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsRunAs struct {
|
||||
ServicePrincipalName string `json:"service_principal_name,omitempty"`
|
||||
UserName string `json:"user_name,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsSchedule struct {
|
||||
PauseStatus string `json:"pause_status,omitempty"`
|
||||
QuartzCronExpression string `json:"quartz_cron_expression"`
|
||||
|
@ -385,6 +408,7 @@ type DataSourceJobJobSettingsSettingsSparkJarTask struct {
|
|||
type DataSourceJobJobSettingsSettingsSparkPythonTask struct {
|
||||
Parameters []string `json:"parameters,omitempty"`
|
||||
PythonFile string `json:"python_file"`
|
||||
Source string `json:"source,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsSparkSubmitTask struct {
|
||||
|
@ -533,12 +557,17 @@ type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 struct {
|
|||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsWorkspace struct {
|
||||
Destination string `json:"destination,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsTaskNewClusterInitScripts struct {
|
||||
Abfss *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Abfss *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Workspace *DataSourceJobJobSettingsSettingsTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsTaskNewClusterWorkloadTypeClients struct {
|
||||
|
@ -609,6 +638,7 @@ type DataSourceJobJobSettingsSettingsTaskSparkJarTask struct {
|
|||
type DataSourceJobJobSettingsSettingsTaskSparkPythonTask struct {
|
||||
Parameters []string `json:"parameters,omitempty"`
|
||||
PythonFile string `json:"python_file"`
|
||||
Source string `json:"source,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsTaskSparkSubmitTask struct {
|
||||
|
@ -623,6 +653,10 @@ type DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard struct {
|
|||
DashboardId string `json:"dashboard_id"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsTaskSqlTaskFile struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsTaskSqlTaskQuery struct {
|
||||
QueryId string `json:"query_id"`
|
||||
}
|
||||
|
@ -632,6 +666,7 @@ type DataSourceJobJobSettingsSettingsTaskSqlTask struct {
|
|||
WarehouseId string `json:"warehouse_id,omitempty"`
|
||||
Alert *DataSourceJobJobSettingsSettingsTaskSqlTaskAlert `json:"alert,omitempty"`
|
||||
Dashboard *DataSourceJobJobSettingsSettingsTaskSqlTaskDashboard `json:"dashboard,omitempty"`
|
||||
File *DataSourceJobJobSettingsSettingsTaskSqlTaskFile `json:"file,omitempty"`
|
||||
Query *DataSourceJobJobSettingsSettingsTaskSqlTaskQuery `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -642,6 +677,7 @@ type DataSourceJobJobSettingsSettingsTask struct {
|
|||
MaxRetries int `json:"max_retries,omitempty"`
|
||||
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
||||
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
||||
RunIf string `json:"run_if,omitempty"`
|
||||
TaskKey string `json:"task_key,omitempty"`
|
||||
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
||||
DbtTask *DataSourceJobJobSettingsSettingsTaskDbtTask `json:"dbt_task,omitempty"`
|
||||
|
@ -658,6 +694,17 @@ type DataSourceJobJobSettingsSettingsTask struct {
|
|||
SqlTask *DataSourceJobJobSettingsSettingsTaskSqlTask `json:"sql_task,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsTriggerFileArrival struct {
|
||||
MinTimeBetweenTriggerSeconds int `json:"min_time_between_trigger_seconds,omitempty"`
|
||||
Url string `json:"url"`
|
||||
WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsTrigger struct {
|
||||
PauseStatus string `json:"pause_status,omitempty"`
|
||||
FileArrival *DataSourceJobJobSettingsSettingsTriggerFileArrival `json:"file_arrival,omitempty"`
|
||||
}
|
||||
|
||||
type DataSourceJobJobSettingsSettingsWebhookNotificationsOnFailure struct {
|
||||
Id string `json:"id"`
|
||||
}
|
||||
|
@ -694,13 +741,17 @@ type DataSourceJobJobSettingsSettings struct {
|
|||
Library []DataSourceJobJobSettingsSettingsLibrary `json:"library,omitempty"`
|
||||
NewCluster *DataSourceJobJobSettingsSettingsNewCluster `json:"new_cluster,omitempty"`
|
||||
NotebookTask *DataSourceJobJobSettingsSettingsNotebookTask `json:"notebook_task,omitempty"`
|
||||
NotificationSettings *DataSourceJobJobSettingsSettingsNotificationSettings `json:"notification_settings,omitempty"`
|
||||
PipelineTask *DataSourceJobJobSettingsSettingsPipelineTask `json:"pipeline_task,omitempty"`
|
||||
PythonWheelTask *DataSourceJobJobSettingsSettingsPythonWheelTask `json:"python_wheel_task,omitempty"`
|
||||
Queue *DataSourceJobJobSettingsSettingsQueue `json:"queue,omitempty"`
|
||||
RunAs *DataSourceJobJobSettingsSettingsRunAs `json:"run_as,omitempty"`
|
||||
Schedule *DataSourceJobJobSettingsSettingsSchedule `json:"schedule,omitempty"`
|
||||
SparkJarTask *DataSourceJobJobSettingsSettingsSparkJarTask `json:"spark_jar_task,omitempty"`
|
||||
SparkPythonTask *DataSourceJobJobSettingsSettingsSparkPythonTask `json:"spark_python_task,omitempty"`
|
||||
SparkSubmitTask *DataSourceJobJobSettingsSettingsSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
||||
Task []DataSourceJobJobSettingsSettingsTask `json:"task,omitempty"`
|
||||
Trigger *DataSourceJobJobSettingsSettingsTrigger `json:"trigger,omitempty"`
|
||||
WebhookNotifications *DataSourceJobJobSettingsSettingsWebhookNotifications `json:"webhook_notifications,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -708,6 +759,7 @@ type DataSourceJobJobSettings struct {
|
|||
CreatedTime int `json:"created_time,omitempty"`
|
||||
CreatorUserName string `json:"creator_user_name,omitempty"`
|
||||
JobId int `json:"job_id,omitempty"`
|
||||
RunAsUserName string `json:"run_as_user_name,omitempty"`
|
||||
Settings *DataSourceJobJobSettingsSettings `json:"settings,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type DataSourcePipelines struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Ids []string `json:"ids,omitempty"`
|
||||
PipelineName string `json:"pipeline_name,omitempty"`
|
||||
}
|
|
@ -23,6 +23,7 @@ type DataSources struct {
|
|||
NodeType map[string]*DataSourceNodeType `json:"databricks_node_type,omitempty"`
|
||||
Notebook map[string]*DataSourceNotebook `json:"databricks_notebook,omitempty"`
|
||||
NotebookPaths map[string]*DataSourceNotebookPaths `json:"databricks_notebook_paths,omitempty"`
|
||||
Pipelines map[string]*DataSourcePipelines `json:"databricks_pipelines,omitempty"`
|
||||
Schemas map[string]*DataSourceSchemas `json:"databricks_schemas,omitempty"`
|
||||
ServicePrincipal map[string]*DataSourceServicePrincipal `json:"databricks_service_principal,omitempty"`
|
||||
ServicePrincipals map[string]*DataSourceServicePrincipals `json:"databricks_service_principals,omitempty"`
|
||||
|
@ -59,6 +60,7 @@ func NewDataSources() *DataSources {
|
|||
NodeType: make(map[string]*DataSourceNodeType),
|
||||
Notebook: make(map[string]*DataSourceNotebook),
|
||||
NotebookPaths: make(map[string]*DataSourceNotebookPaths),
|
||||
Pipelines: make(map[string]*DataSourcePipelines),
|
||||
Schemas: make(map[string]*DataSourceSchemas),
|
||||
ServicePrincipal: make(map[string]*DataSourceServicePrincipal),
|
||||
ServicePrincipals: make(map[string]*DataSourceServicePrincipals),
|
||||
|
|
|
@ -3,14 +3,15 @@
|
|||
package schema
|
||||
|
||||
type ResourceCatalog struct {
|
||||
Comment string `json:"comment,omitempty"`
|
||||
ForceDestroy bool `json:"force_destroy,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
Properties map[string]string `json:"properties,omitempty"`
|
||||
ProviderName string `json:"provider_name,omitempty"`
|
||||
ShareName string `json:"share_name,omitempty"`
|
||||
StorageRoot string `json:"storage_root,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
ForceDestroy bool `json:"force_destroy,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
IsolationMode string `json:"isolation_mode,omitempty"`
|
||||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
Properties map[string]string `json:"properties,omitempty"`
|
||||
ProviderName string `json:"provider_name,omitempty"`
|
||||
ShareName string `json:"share_name,omitempty"`
|
||||
StorageRoot string `json:"storage_root,omitempty"`
|
||||
}
|
||||
|
|
|
@ -98,12 +98,17 @@ type ResourceClusterInitScriptsS3 struct {
|
|||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceClusterInitScriptsWorkspace struct {
|
||||
Destination string `json:"destination,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceClusterInitScripts struct {
|
||||
Abfss *ResourceClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourceClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourceClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourceClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourceClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Abfss *ResourceClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourceClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourceClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourceClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourceClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Workspace *ResourceClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceClusterLibraryCran struct {
|
||||
|
|
|
@ -3,9 +3,12 @@
|
|||
package schema
|
||||
|
||||
type ResourceClusterPolicy struct {
|
||||
Definition string `json:"definition"`
|
||||
Id string `json:"id,omitempty"`
|
||||
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
|
||||
Name string `json:"name"`
|
||||
PolicyId string `json:"policy_id,omitempty"`
|
||||
Definition string `json:"definition,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
MaxClustersPerUser int `json:"max_clusters_per_user,omitempty"`
|
||||
Name string `json:"name"`
|
||||
PolicyFamilyDefinitionOverrides string `json:"policy_family_definition_overrides,omitempty"`
|
||||
PolicyFamilyId string `json:"policy_family_id,omitempty"`
|
||||
PolicyId string `json:"policy_id,omitempty"`
|
||||
}
|
||||
|
|
|
@ -5,10 +5,12 @@ package schema
|
|||
type ResourceExternalLocation struct {
|
||||
Comment string `json:"comment,omitempty"`
|
||||
CredentialName string `json:"credential_name"`
|
||||
ForceDestroy bool `json:"force_destroy,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
ReadOnly bool `json:"read_only,omitempty"`
|
||||
SkipValidation bool `json:"skip_validation,omitempty"`
|
||||
Url string `json:"url"`
|
||||
}
|
||||
|
|
|
@ -127,12 +127,17 @@ type ResourceJobJobClusterNewClusterInitScriptsS3 struct {
|
|||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobJobClusterNewClusterInitScriptsWorkspace struct {
|
||||
Destination string `json:"destination,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobJobClusterNewClusterInitScripts struct {
|
||||
Abfss *ResourceJobJobClusterNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourceJobJobClusterNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourceJobJobClusterNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourceJobJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourceJobJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Abfss *ResourceJobJobClusterNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourceJobJobClusterNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourceJobJobClusterNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourceJobJobClusterNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourceJobJobClusterNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Workspace *ResourceJobJobClusterNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobJobClusterNewClusterWorkloadTypeClients struct {
|
||||
|
@ -303,12 +308,17 @@ type ResourceJobNewClusterInitScriptsS3 struct {
|
|||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobNewClusterInitScriptsWorkspace struct {
|
||||
Destination string `json:"destination,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobNewClusterInitScripts struct {
|
||||
Abfss *ResourceJobNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourceJobNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourceJobNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourceJobNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourceJobNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Abfss *ResourceJobNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourceJobNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourceJobNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourceJobNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourceJobNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Workspace *ResourceJobNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobNewClusterWorkloadTypeClients struct {
|
||||
|
@ -359,6 +369,11 @@ type ResourceJobNotebookTask struct {
|
|||
Source string `json:"source,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobNotificationSettings struct {
|
||||
NoAlertForCanceledRuns bool `json:"no_alert_for_canceled_runs,omitempty"`
|
||||
NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobPipelineTask struct {
|
||||
PipelineId string `json:"pipeline_id"`
|
||||
}
|
||||
|
@ -370,6 +385,14 @@ type ResourceJobPythonWheelTask struct {
|
|||
Parameters []string `json:"parameters,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobQueue struct {
|
||||
}
|
||||
|
||||
type ResourceJobRunAs struct {
|
||||
ServicePrincipalName string `json:"service_principal_name,omitempty"`
|
||||
UserName string `json:"user_name,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobSchedule struct {
|
||||
PauseStatus string `json:"pause_status,omitempty"`
|
||||
QuartzCronExpression string `json:"quartz_cron_expression"`
|
||||
|
@ -385,6 +408,7 @@ type ResourceJobSparkJarTask struct {
|
|||
type ResourceJobSparkPythonTask struct {
|
||||
Parameters []string `json:"parameters,omitempty"`
|
||||
PythonFile string `json:"python_file"`
|
||||
Source string `json:"source,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobSparkSubmitTask struct {
|
||||
|
@ -533,12 +557,17 @@ type ResourceJobTaskNewClusterInitScriptsS3 struct {
|
|||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskNewClusterInitScriptsWorkspace struct {
|
||||
Destination string `json:"destination,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskNewClusterInitScripts struct {
|
||||
Abfss *ResourceJobTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourceJobTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourceJobTaskNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourceJobTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourceJobTaskNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Abfss *ResourceJobTaskNewClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourceJobTaskNewClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourceJobTaskNewClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourceJobTaskNewClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourceJobTaskNewClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Workspace *ResourceJobTaskNewClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskNewClusterWorkloadTypeClients struct {
|
||||
|
@ -609,6 +638,7 @@ type ResourceJobTaskSparkJarTask struct {
|
|||
type ResourceJobTaskSparkPythonTask struct {
|
||||
Parameters []string `json:"parameters,omitempty"`
|
||||
PythonFile string `json:"python_file"`
|
||||
Source string `json:"source,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskSparkSubmitTask struct {
|
||||
|
@ -623,6 +653,10 @@ type ResourceJobTaskSqlTaskDashboard struct {
|
|||
DashboardId string `json:"dashboard_id"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskSqlTaskFile struct {
|
||||
Path string `json:"path"`
|
||||
}
|
||||
|
||||
type ResourceJobTaskSqlTaskQuery struct {
|
||||
QueryId string `json:"query_id"`
|
||||
}
|
||||
|
@ -632,6 +666,7 @@ type ResourceJobTaskSqlTask struct {
|
|||
WarehouseId string `json:"warehouse_id,omitempty"`
|
||||
Alert *ResourceJobTaskSqlTaskAlert `json:"alert,omitempty"`
|
||||
Dashboard *ResourceJobTaskSqlTaskDashboard `json:"dashboard,omitempty"`
|
||||
File *ResourceJobTaskSqlTaskFile `json:"file,omitempty"`
|
||||
Query *ResourceJobTaskSqlTaskQuery `json:"query,omitempty"`
|
||||
}
|
||||
|
||||
|
@ -642,6 +677,7 @@ type ResourceJobTask struct {
|
|||
MaxRetries int `json:"max_retries,omitempty"`
|
||||
MinRetryIntervalMillis int `json:"min_retry_interval_millis,omitempty"`
|
||||
RetryOnTimeout bool `json:"retry_on_timeout,omitempty"`
|
||||
RunIf string `json:"run_if,omitempty"`
|
||||
TaskKey string `json:"task_key,omitempty"`
|
||||
TimeoutSeconds int `json:"timeout_seconds,omitempty"`
|
||||
DbtTask *ResourceJobTaskDbtTask `json:"dbt_task,omitempty"`
|
||||
|
@ -658,6 +694,17 @@ type ResourceJobTask struct {
|
|||
SqlTask *ResourceJobTaskSqlTask `json:"sql_task,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTriggerFileArrival struct {
|
||||
MinTimeBetweenTriggerSeconds int `json:"min_time_between_trigger_seconds,omitempty"`
|
||||
Url string `json:"url"`
|
||||
WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobTrigger struct {
|
||||
PauseStatus string `json:"pause_status,omitempty"`
|
||||
FileArrival *ResourceJobTriggerFileArrival `json:"file_arrival,omitempty"`
|
||||
}
|
||||
|
||||
type ResourceJobWebhookNotificationsOnFailure struct {
|
||||
Id string `json:"id"`
|
||||
}
|
||||
|
@ -697,12 +744,16 @@ type ResourceJob struct {
|
|||
Library []ResourceJobLibrary `json:"library,omitempty"`
|
||||
NewCluster *ResourceJobNewCluster `json:"new_cluster,omitempty"`
|
||||
NotebookTask *ResourceJobNotebookTask `json:"notebook_task,omitempty"`
|
||||
NotificationSettings *ResourceJobNotificationSettings `json:"notification_settings,omitempty"`
|
||||
PipelineTask *ResourceJobPipelineTask `json:"pipeline_task,omitempty"`
|
||||
PythonWheelTask *ResourceJobPythonWheelTask `json:"python_wheel_task,omitempty"`
|
||||
Queue *ResourceJobQueue `json:"queue,omitempty"`
|
||||
RunAs *ResourceJobRunAs `json:"run_as,omitempty"`
|
||||
Schedule *ResourceJobSchedule `json:"schedule,omitempty"`
|
||||
SparkJarTask *ResourceJobSparkJarTask `json:"spark_jar_task,omitempty"`
|
||||
SparkPythonTask *ResourceJobSparkPythonTask `json:"spark_python_task,omitempty"`
|
||||
SparkSubmitTask *ResourceJobSparkSubmitTask `json:"spark_submit_task,omitempty"`
|
||||
Task []ResourceJobTask `json:"task,omitempty"`
|
||||
Trigger *ResourceJobTrigger `json:"trigger,omitempty"`
|
||||
WebhookNotifications *ResourceJobWebhookNotifications `json:"webhook_notifications,omitempty"`
|
||||
}
|
||||
|
|
|
@ -25,7 +25,8 @@ type ResourceModelServingConfig struct {
|
|||
}
|
||||
|
||||
type ResourceModelServing struct {
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Config *ResourceModelServingConfig `json:"config,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
|
||||
Config *ResourceModelServingConfig `json:"config,omitempty"`
|
||||
}
|
||||
|
|
|
@ -26,9 +26,12 @@ type ResourcePermissions struct {
|
|||
RegisteredModelId string `json:"registered_model_id,omitempty"`
|
||||
RepoId string `json:"repo_id,omitempty"`
|
||||
RepoPath string `json:"repo_path,omitempty"`
|
||||
ServingEndpointId string `json:"serving_endpoint_id,omitempty"`
|
||||
SqlAlertId string `json:"sql_alert_id,omitempty"`
|
||||
SqlDashboardId string `json:"sql_dashboard_id,omitempty"`
|
||||
SqlEndpointId string `json:"sql_endpoint_id,omitempty"`
|
||||
SqlQueryId string `json:"sql_query_id,omitempty"`
|
||||
WorkspaceFileId string `json:"workspace_file_id,omitempty"`
|
||||
WorkspaceFilePath string `json:"workspace_file_path,omitempty"`
|
||||
AccessControl []ResourcePermissionsAccessControl `json:"access_control,omitempty"`
|
||||
}
|
||||
|
|
|
@ -76,12 +76,17 @@ type ResourcePipelineClusterInitScriptsS3 struct {
|
|||
Region string `json:"region,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineClusterInitScriptsWorkspace struct {
|
||||
Destination string `json:"destination,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineClusterInitScripts struct {
|
||||
Abfss *ResourcePipelineClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourcePipelineClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourcePipelineClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourcePipelineClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourcePipelineClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Abfss *ResourcePipelineClusterInitScriptsAbfss `json:"abfss,omitempty"`
|
||||
Dbfs *ResourcePipelineClusterInitScriptsDbfs `json:"dbfs,omitempty"`
|
||||
File *ResourcePipelineClusterInitScriptsFile `json:"file,omitempty"`
|
||||
Gcs *ResourcePipelineClusterInitScriptsGcs `json:"gcs,omitempty"`
|
||||
S3 *ResourcePipelineClusterInitScriptsS3 `json:"s3,omitempty"`
|
||||
Workspace *ResourcePipelineClusterInitScriptsWorkspace `json:"workspace,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipelineCluster struct {
|
||||
|
@ -133,21 +138,28 @@ type ResourcePipelineLibrary struct {
|
|||
Notebook *ResourcePipelineLibraryNotebook `json:"notebook,omitempty"`
|
||||
}
|
||||
|
||||
type ResourcePipeline struct {
|
||||
AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"`
|
||||
Catalog string `json:"catalog,omitempty"`
|
||||
Channel string `json:"channel,omitempty"`
|
||||
Configuration map[string]string `json:"configuration,omitempty"`
|
||||
Continuous bool `json:"continuous,omitempty"`
|
||||
Development bool `json:"development,omitempty"`
|
||||
Edition string `json:"edition,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Photon bool `json:"photon,omitempty"`
|
||||
Storage string `json:"storage,omitempty"`
|
||||
Target string `json:"target,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Cluster []ResourcePipelineCluster `json:"cluster,omitempty"`
|
||||
Filters *ResourcePipelineFilters `json:"filters,omitempty"`
|
||||
Library []ResourcePipelineLibrary `json:"library,omitempty"`
|
||||
type ResourcePipelineNotification struct {
|
||||
Alerts []string `json:"alerts"`
|
||||
EmailRecipients []string `json:"email_recipients"`
|
||||
}
|
||||
|
||||
type ResourcePipeline struct {
|
||||
AllowDuplicateNames bool `json:"allow_duplicate_names,omitempty"`
|
||||
Catalog string `json:"catalog,omitempty"`
|
||||
Channel string `json:"channel,omitempty"`
|
||||
Configuration map[string]string `json:"configuration,omitempty"`
|
||||
Continuous bool `json:"continuous,omitempty"`
|
||||
Development bool `json:"development,omitempty"`
|
||||
Edition string `json:"edition,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Photon bool `json:"photon,omitempty"`
|
||||
Serverless bool `json:"serverless,omitempty"`
|
||||
Storage string `json:"storage,omitempty"`
|
||||
Target string `json:"target,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
Cluster []ResourcePipelineCluster `json:"cluster,omitempty"`
|
||||
Filters *ResourcePipelineFilters `json:"filters,omitempty"`
|
||||
Library []ResourcePipelineLibrary `json:"library,omitempty"`
|
||||
Notification []ResourcePipelineNotification `json:"notification,omitempty"`
|
||||
}
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceSqlTableColumn struct {
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Nullable bool `json:"nullable,omitempty"`
|
||||
Type string `json:"type"`
|
||||
}
|
||||
|
||||
type ResourceSqlTable struct {
|
||||
CatalogName string `json:"catalog_name"`
|
||||
ClusterId string `json:"cluster_id,omitempty"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
DataSourceFormat string `json:"data_source_format,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Properties map[string]string `json:"properties,omitempty"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
StorageCredentialName string `json:"storage_credential_name,omitempty"`
|
||||
StorageLocation string `json:"storage_location,omitempty"`
|
||||
TableType string `json:"table_type"`
|
||||
ViewDefinition string `json:"view_definition,omitempty"`
|
||||
Column []ResourceSqlTableColumn `json:"column,omitempty"`
|
||||
}
|
|
@ -32,6 +32,7 @@ type ResourceStorageCredential struct {
|
|||
MetastoreId string `json:"metastore_id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
ReadOnly bool `json:"read_only,omitempty"`
|
||||
AwsIamRole *ResourceStorageCredentialAwsIamRole `json:"aws_iam_role,omitempty"`
|
||||
AzureManagedIdentity *ResourceStorageCredentialAzureManagedIdentity `json:"azure_managed_identity,omitempty"`
|
||||
AzureServicePrincipal *ResourceStorageCredentialAzureServicePrincipal `json:"azure_service_principal,omitempty"`
|
||||
|
|
|
@ -0,0 +1,14 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceVolume struct {
|
||||
CatalogName string `json:"catalog_name"`
|
||||
Comment string `json:"comment,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Name string `json:"name"`
|
||||
Owner string `json:"owner,omitempty"`
|
||||
SchemaName string `json:"schema_name"`
|
||||
StorageLocation string `json:"storage_location,omitempty"`
|
||||
VolumeType string `json:"volume_type"`
|
||||
}
|
|
@ -0,0 +1,13 @@
|
|||
// Generated from Databricks Terraform provider schema. DO NOT EDIT.
|
||||
|
||||
package schema
|
||||
|
||||
type ResourceWorkspaceFile struct {
|
||||
ContentBase64 string `json:"content_base64,omitempty"`
|
||||
Id string `json:"id,omitempty"`
|
||||
Md5 string `json:"md5,omitempty"`
|
||||
ObjectId int `json:"object_id,omitempty"`
|
||||
Path string `json:"path"`
|
||||
Source string `json:"source,omitempty"`
|
||||
Url string `json:"url,omitempty"`
|
||||
}
|
|
@ -65,6 +65,7 @@ type Resources struct {
|
|||
SqlGlobalConfig map[string]*ResourceSqlGlobalConfig `json:"databricks_sql_global_config,omitempty"`
|
||||
SqlPermissions map[string]*ResourceSqlPermissions `json:"databricks_sql_permissions,omitempty"`
|
||||
SqlQuery map[string]*ResourceSqlQuery `json:"databricks_sql_query,omitempty"`
|
||||
SqlTable map[string]*ResourceSqlTable `json:"databricks_sql_table,omitempty"`
|
||||
SqlVisualization map[string]*ResourceSqlVisualization `json:"databricks_sql_visualization,omitempty"`
|
||||
SqlWidget map[string]*ResourceSqlWidget `json:"databricks_sql_widget,omitempty"`
|
||||
StorageCredential map[string]*ResourceStorageCredential `json:"databricks_storage_credential,omitempty"`
|
||||
|
@ -73,7 +74,9 @@ type Resources struct {
|
|||
User map[string]*ResourceUser `json:"databricks_user,omitempty"`
|
||||
UserInstanceProfile map[string]*ResourceUserInstanceProfile `json:"databricks_user_instance_profile,omitempty"`
|
||||
UserRole map[string]*ResourceUserRole `json:"databricks_user_role,omitempty"`
|
||||
Volume map[string]*ResourceVolume `json:"databricks_volume,omitempty"`
|
||||
WorkspaceConf map[string]*ResourceWorkspaceConf `json:"databricks_workspace_conf,omitempty"`
|
||||
WorkspaceFile map[string]*ResourceWorkspaceFile `json:"databricks_workspace_file,omitempty"`
|
||||
}
|
||||
|
||||
func NewResources() *Resources {
|
||||
|
@ -140,6 +143,7 @@ func NewResources() *Resources {
|
|||
SqlGlobalConfig: make(map[string]*ResourceSqlGlobalConfig),
|
||||
SqlPermissions: make(map[string]*ResourceSqlPermissions),
|
||||
SqlQuery: make(map[string]*ResourceSqlQuery),
|
||||
SqlTable: make(map[string]*ResourceSqlTable),
|
||||
SqlVisualization: make(map[string]*ResourceSqlVisualization),
|
||||
SqlWidget: make(map[string]*ResourceSqlWidget),
|
||||
StorageCredential: make(map[string]*ResourceStorageCredential),
|
||||
|
@ -148,6 +152,8 @@ func NewResources() *Resources {
|
|||
User: make(map[string]*ResourceUser),
|
||||
UserInstanceProfile: make(map[string]*ResourceUserInstanceProfile),
|
||||
UserRole: make(map[string]*ResourceUserRole),
|
||||
Volume: make(map[string]*ResourceVolume),
|
||||
WorkspaceConf: make(map[string]*ResourceWorkspaceConf),
|
||||
WorkspaceFile: make(map[string]*ResourceWorkspaceFile),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -13,42 +13,18 @@ type Mutator interface {
|
|||
Name() string
|
||||
|
||||
// Apply mutates the specified bundle object.
|
||||
// It may return a list of mutators to apply immediately after this mutator.
|
||||
// For example: when processing all configuration files in the tree; each file gets
|
||||
// its own mutator instance.
|
||||
Apply(context.Context, *Bundle) ([]Mutator, error)
|
||||
Apply(context.Context, *Bundle) error
|
||||
}
|
||||
|
||||
// applyMutator calls apply on the specified mutator given a bundle.
|
||||
// Any mutators this call returns are applied recursively.
|
||||
func applyMutator(ctx context.Context, b *Bundle, m Mutator) error {
|
||||
func Apply(ctx context.Context, b *Bundle, m Mutator) error {
|
||||
ctx = log.NewContext(ctx, log.GetLogger(ctx).With("mutator", m.Name()))
|
||||
|
||||
log.Debugf(ctx, "Apply")
|
||||
ms, err := m.Apply(ctx, b)
|
||||
err := m.Apply(ctx, b)
|
||||
if err != nil {
|
||||
log.Errorf(ctx, "Error: %s", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Apply recursively.
|
||||
err = Apply(ctx, b, ms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func Apply(ctx context.Context, b *Bundle, ms []Mutator) error {
|
||||
if len(ms) == 0 {
|
||||
return nil
|
||||
}
|
||||
for _, m := range ms {
|
||||
err := applyMutator(ctx, b, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -16,9 +16,9 @@ func (t *testMutator) Name() string {
|
|||
return "test"
|
||||
}
|
||||
|
||||
func (t *testMutator) Apply(_ context.Context, b *Bundle) ([]Mutator, error) {
|
||||
func (t *testMutator) Apply(ctx context.Context, b *Bundle) error {
|
||||
t.applyCalled++
|
||||
return t.nestedMutators, nil
|
||||
return Apply(ctx, b, Seq(t.nestedMutators...))
|
||||
}
|
||||
|
||||
func TestMutator(t *testing.T) {
|
||||
|
@ -35,7 +35,7 @@ func TestMutator(t *testing.T) {
|
|||
}
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, []Mutator{m})
|
||||
err := Apply(context.Background(), bundle, m)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, m.applyCalled)
|
||||
|
|
|
@ -10,21 +10,24 @@ import (
|
|||
|
||||
// The deploy phase deploys artifacts and resources.
|
||||
func Deploy() bundle.Mutator {
|
||||
deployPhase := bundle.Defer([]bundle.Mutator{
|
||||
deployMutator := bundle.Seq(
|
||||
lock.Acquire(),
|
||||
files.Upload(),
|
||||
artifacts.UploadAll(),
|
||||
terraform.Interpolate(),
|
||||
terraform.Write(),
|
||||
terraform.StatePull(),
|
||||
terraform.Apply(),
|
||||
terraform.StatePush(),
|
||||
}, []bundle.Mutator{
|
||||
lock.Release(),
|
||||
})
|
||||
bundle.Defer(
|
||||
bundle.Seq(
|
||||
files.Upload(),
|
||||
artifacts.UploadAll(),
|
||||
terraform.Interpolate(),
|
||||
terraform.Write(),
|
||||
terraform.StatePull(),
|
||||
terraform.Apply(),
|
||||
terraform.StatePush(),
|
||||
),
|
||||
lock.Release(lock.GoalDeploy),
|
||||
),
|
||||
)
|
||||
|
||||
return newPhase(
|
||||
"deploy",
|
||||
deployPhase,
|
||||
[]bundle.Mutator{deployMutator},
|
||||
)
|
||||
}
|
||||
|
|
|
@ -9,19 +9,23 @@ import (
|
|||
|
||||
// The destroy phase deletes artifacts and resources.
|
||||
func Destroy() bundle.Mutator {
|
||||
destroyPhase := bundle.Defer([]bundle.Mutator{
|
||||
|
||||
destroyMutator := bundle.Seq(
|
||||
lock.Acquire(),
|
||||
terraform.StatePull(),
|
||||
terraform.Plan(terraform.PlanGoal("destroy")),
|
||||
terraform.Destroy(),
|
||||
terraform.StatePush(),
|
||||
files.Delete(),
|
||||
}, []bundle.Mutator{
|
||||
lock.Release(),
|
||||
})
|
||||
bundle.Defer(
|
||||
bundle.Seq(
|
||||
terraform.StatePull(),
|
||||
terraform.Plan(terraform.PlanGoal("destroy")),
|
||||
terraform.Destroy(),
|
||||
terraform.StatePush(),
|
||||
files.Delete(),
|
||||
),
|
||||
lock.Release(lock.GoalDestroy),
|
||||
),
|
||||
)
|
||||
|
||||
return newPhase(
|
||||
"destroy",
|
||||
destroyPhase,
|
||||
[]bundle.Mutator{destroyMutator},
|
||||
)
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ func (p *phase) Name() string {
|
|||
return p.name
|
||||
}
|
||||
|
||||
func (p *phase) Apply(ctx context.Context, b *bundle.Bundle) ([]bundle.Mutator, error) {
|
||||
func (p *phase) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
log.Infof(ctx, "Phase: %s", p.Name())
|
||||
return p.mutators, nil
|
||||
return bundle.Apply(ctx, b, bundle.Seq(p.mutators...))
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@ import (
|
|||
"github.com/databricks/cli/bundle/run/progress"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go/retries"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/fatih/color"
|
||||
flag "github.com/spf13/pflag"
|
||||
|
@ -145,27 +144,17 @@ func (r *jobRunner) logFailedTasks(ctx context.Context, runId int64) {
|
|||
}
|
||||
}
|
||||
|
||||
func pullRunIdCallback(runId *int64) func(info *retries.Info[jobs.Run]) {
|
||||
return func(info *retries.Info[jobs.Run]) {
|
||||
i := info.Info
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
|
||||
func pullRunIdCallback(runId *int64) func(info *jobs.Run) {
|
||||
return func(i *jobs.Run) {
|
||||
if *runId == 0 {
|
||||
*runId = i.RunId
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logDebugCallback(ctx context.Context, runId *int64) func(info *retries.Info[jobs.Run]) {
|
||||
func logDebugCallback(ctx context.Context, runId *int64) func(info *jobs.Run) {
|
||||
var prevState *jobs.RunState
|
||||
return func(info *retries.Info[jobs.Run]) {
|
||||
i := info.Info
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
|
||||
return func(i *jobs.Run) {
|
||||
state := i.State
|
||||
if state == nil {
|
||||
return
|
||||
|
@ -173,23 +162,18 @@ func logDebugCallback(ctx context.Context, runId *int64) func(info *retries.Info
|
|||
|
||||
// Log the job run URL as soon as it is available.
|
||||
if prevState == nil {
|
||||
log.Infof(ctx, "Run available at %s", info.Info.RunPageUrl)
|
||||
log.Infof(ctx, "Run available at %s", i.RunPageUrl)
|
||||
}
|
||||
if prevState == nil || prevState.LifeCycleState != state.LifeCycleState {
|
||||
log.Infof(ctx, "Run status: %s", info.Info.State.LifeCycleState)
|
||||
log.Infof(ctx, "Run status: %s", i.State.LifeCycleState)
|
||||
prevState = state
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func(info *retries.Info[jobs.Run]) {
|
||||
func logProgressCallback(ctx context.Context, progressLogger *cmdio.Logger) func(info *jobs.Run) {
|
||||
var prevState *jobs.RunState
|
||||
return func(info *retries.Info[jobs.Run]) {
|
||||
i := info.Info
|
||||
if i == nil {
|
||||
return
|
||||
}
|
||||
|
||||
return func(i *jobs.Run) {
|
||||
state := i.State
|
||||
if state == nil {
|
||||
return
|
||||
|
@ -255,8 +239,15 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e
|
|||
}
|
||||
logProgress := logProgressCallback(ctx, progressLogger)
|
||||
|
||||
run, err := w.Jobs.RunNowAndWait(ctx, *req,
|
||||
retries.Timeout[jobs.Run](jobRunTimeout), pullRunId, logDebug, logProgress)
|
||||
waiter, err := w.Jobs.RunNow(ctx, *req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot start job")
|
||||
}
|
||||
run, err := waiter.OnProgress(func(r *jobs.Run) {
|
||||
pullRunId(r)
|
||||
logDebug(r)
|
||||
logProgress(r)
|
||||
}).GetWithTimeout(jobRunTimeout)
|
||||
if err != nil && runId != nil {
|
||||
r.logFailedTasks(ctx, *runId)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,25 @@
|
|||
package bundle
|
||||
|
||||
import "context"
|
||||
|
||||
type seqMutator struct {
|
||||
mutators []Mutator
|
||||
}
|
||||
|
||||
func (s *seqMutator) Name() string {
|
||||
return "seq"
|
||||
}
|
||||
|
||||
func (s *seqMutator) Apply(ctx context.Context, b *Bundle) error {
|
||||
for _, m := range s.mutators {
|
||||
err := Apply(ctx, b, m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func Seq(ms ...Mutator) Mutator {
|
||||
return &seqMutator{mutators: ms}
|
||||
}
|
|
@ -0,0 +1,91 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSeqMutator(t *testing.T) {
|
||||
m1 := &testMutator{}
|
||||
m2 := &testMutator{}
|
||||
m3 := &testMutator{}
|
||||
seqMutator := Seq(m1, m2, m3)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, seqMutator)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, m1.applyCalled)
|
||||
assert.Equal(t, 1, m2.applyCalled)
|
||||
assert.Equal(t, 1, m3.applyCalled)
|
||||
}
|
||||
|
||||
func TestSeqWithDeferredMutator(t *testing.T) {
|
||||
m1 := &testMutator{}
|
||||
m2 := &testMutator{}
|
||||
m3 := &testMutator{}
|
||||
m4 := &testMutator{}
|
||||
seqMutator := Seq(m1, Defer(m2, m3), m4)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, seqMutator)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, m1.applyCalled)
|
||||
assert.Equal(t, 1, m2.applyCalled)
|
||||
assert.Equal(t, 1, m3.applyCalled)
|
||||
assert.Equal(t, 1, m4.applyCalled)
|
||||
}
|
||||
|
||||
func TestSeqWithErrorAndDeferredMutator(t *testing.T) {
|
||||
errorMut := &mutatorWithError{errorMsg: "error msg"}
|
||||
m1 := &testMutator{}
|
||||
m2 := &testMutator{}
|
||||
m3 := &testMutator{}
|
||||
seqMutator := Seq(errorMut, Defer(m1, m2), m3)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, seqMutator)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Equal(t, 1, errorMut.applyCalled)
|
||||
assert.Equal(t, 0, m1.applyCalled)
|
||||
assert.Equal(t, 0, m2.applyCalled)
|
||||
assert.Equal(t, 0, m3.applyCalled)
|
||||
}
|
||||
|
||||
func TestSeqWithErrorInsideDeferredMutator(t *testing.T) {
|
||||
errorMut := &mutatorWithError{errorMsg: "error msg"}
|
||||
m1 := &testMutator{}
|
||||
m2 := &testMutator{}
|
||||
m3 := &testMutator{}
|
||||
seqMutator := Seq(m1, Defer(errorMut, m2), m3)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, seqMutator)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Equal(t, 1, m1.applyCalled)
|
||||
assert.Equal(t, 1, errorMut.applyCalled)
|
||||
assert.Equal(t, 1, m2.applyCalled)
|
||||
assert.Equal(t, 0, m3.applyCalled)
|
||||
}
|
||||
|
||||
func TestSeqWithErrorInsideFinallyStage(t *testing.T) {
|
||||
errorMut := &mutatorWithError{errorMsg: "error msg"}
|
||||
m1 := &testMutator{}
|
||||
m2 := &testMutator{}
|
||||
m3 := &testMutator{}
|
||||
seqMutator := Seq(m1, Defer(m2, errorMut), m3)
|
||||
|
||||
bundle := &Bundle{}
|
||||
err := Apply(context.Background(), bundle, seqMutator)
|
||||
assert.Error(t, err)
|
||||
|
||||
assert.Equal(t, 1, m1.applyCalled)
|
||||
assert.Equal(t, 1, m2.applyCalled)
|
||||
assert.Equal(t, 1, errorMut.applyCalled)
|
||||
assert.Equal(t, 0, m3.applyCalled)
|
||||
}
|
|
@ -21,7 +21,7 @@ func TestConflictingResourceIdsNoSubconfig(t *testing.T) {
|
|||
func TestConflictingResourceIdsOneSubconfig(t *testing.T) {
|
||||
b, err := bundle.Load("./conflicting_resource_ids/one_subconfiguration")
|
||||
require.NoError(t, err)
|
||||
err = bundle.Apply(context.Background(), b, mutator.DefaultMutators())
|
||||
err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...))
|
||||
bundleConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/bundle.yml")
|
||||
resourcesConfigPath := filepath.FromSlash("conflicting_resource_ids/one_subconfiguration/resources.yml")
|
||||
assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", bundleConfigPath, resourcesConfigPath))
|
||||
|
@ -30,7 +30,7 @@ func TestConflictingResourceIdsOneSubconfig(t *testing.T) {
|
|||
func TestConflictingResourceIdsTwoSubconfigs(t *testing.T) {
|
||||
b, err := bundle.Load("./conflicting_resource_ids/two_subconfigurations")
|
||||
require.NoError(t, err)
|
||||
err = bundle.Apply(context.Background(), b, mutator.DefaultMutators())
|
||||
err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...))
|
||||
resources1ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources1.yml")
|
||||
resources2ConfigPath := filepath.FromSlash("conflicting_resource_ids/two_subconfigurations/resources2.yml")
|
||||
assert.ErrorContains(t, err, fmt.Sprintf("multiple resources named foo (job at %s, pipeline at %s)", resources1ConfigPath, resources2ConfigPath))
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
bundle:
|
||||
name: environment_empty
|
||||
|
||||
environments:
|
||||
development:
|
|
@ -0,0 +1,12 @@
|
|||
package config_tests
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestEnvironmentEmpty(t *testing.T) {
|
||||
b := loadEnvironment(t, "./environment_empty", "development")
|
||||
assert.Equal(t, "development", b.Config.Bundle.Environment)
|
||||
}
|
|
@ -12,11 +12,10 @@ import (
|
|||
|
||||
func TestInterpolation(t *testing.T) {
|
||||
b := load(t, "./interpolation")
|
||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
||||
interpolation.Interpolate(
|
||||
interpolation.IncludeLookupsInPath("bundle"),
|
||||
interpolation.IncludeLookupsInPath("workspace"),
|
||||
)})
|
||||
err := bundle.Apply(context.Background(), b, interpolation.Interpolate(
|
||||
interpolation.IncludeLookupsInPath("bundle"),
|
||||
interpolation.IncludeLookupsInPath("workspace"),
|
||||
))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "foo bar", b.Config.Bundle.Name)
|
||||
assert.Equal(t, "foo bar | bar", b.Config.Resources.Jobs["my_job"].Name)
|
||||
|
|
|
@ -12,14 +12,14 @@ import (
|
|||
func load(t *testing.T, path string) *bundle.Bundle {
|
||||
b, err := bundle.Load(path)
|
||||
require.NoError(t, err)
|
||||
err = bundle.Apply(context.Background(), b, mutator.DefaultMutators())
|
||||
err = bundle.Apply(context.Background(), b, bundle.Seq(mutator.DefaultMutators()...))
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}
|
||||
|
||||
func loadEnvironment(t *testing.T, path, env string) *bundle.Bundle {
|
||||
b := load(t, path)
|
||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{mutator.SelectEnvironment(env)})
|
||||
err := bundle.Apply(context.Background(), b, mutator.SelectEnvironment(env))
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}
|
||||
|
|
|
@ -15,45 +15,45 @@ import (
|
|||
func TestVariables(t *testing.T) {
|
||||
t.Setenv("BUNDLE_VAR_b", "def")
|
||||
b := load(t, "./variables/vanilla")
|
||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
||||
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||
mutator.SetVariables(),
|
||||
interpolation.Interpolate(
|
||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||
)})
|
||||
)))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "abc def", b.Config.Bundle.Name)
|
||||
}
|
||||
|
||||
func TestVariablesLoadingFailsWhenRequiredVariableIsNotSpecified(t *testing.T) {
|
||||
b := load(t, "./variables/vanilla")
|
||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
||||
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||
mutator.SetVariables(),
|
||||
interpolation.Interpolate(
|
||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||
)})
|
||||
)))
|
||||
assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable")
|
||||
}
|
||||
|
||||
func TestVariablesEnvironmentsBlockOverride(t *testing.T) {
|
||||
b := load(t, "./variables/env_overrides")
|
||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
||||
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||
mutator.SelectEnvironment("env-with-single-variable-override"),
|
||||
mutator.SetVariables(),
|
||||
interpolation.Interpolate(
|
||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||
)})
|
||||
)))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "default-a dev-b", b.Config.Workspace.Profile)
|
||||
}
|
||||
|
||||
func TestVariablesEnvironmentsBlockOverrideForMultipleVariables(t *testing.T) {
|
||||
b := load(t, "./variables/env_overrides")
|
||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
||||
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||
mutator.SelectEnvironment("env-with-two-variable-overrides"),
|
||||
mutator.SetVariables(),
|
||||
interpolation.Interpolate(
|
||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||
)})
|
||||
)))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "prod-a prod-b", b.Config.Workspace.Profile)
|
||||
}
|
||||
|
@ -61,34 +61,34 @@ func TestVariablesEnvironmentsBlockOverrideForMultipleVariables(t *testing.T) {
|
|||
func TestVariablesEnvironmentsBlockOverrideWithProcessEnvVars(t *testing.T) {
|
||||
t.Setenv("BUNDLE_VAR_b", "env-var-b")
|
||||
b := load(t, "./variables/env_overrides")
|
||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
||||
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||
mutator.SelectEnvironment("env-with-two-variable-overrides"),
|
||||
mutator.SetVariables(),
|
||||
interpolation.Interpolate(
|
||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||
)})
|
||||
)))
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "prod-a env-var-b", b.Config.Workspace.Profile)
|
||||
}
|
||||
|
||||
func TestVariablesEnvironmentsBlockOverrideWithMissingVariables(t *testing.T) {
|
||||
b := load(t, "./variables/env_overrides")
|
||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
||||
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||
mutator.SelectEnvironment("env-missing-a-required-variable-assignment"),
|
||||
mutator.SetVariables(),
|
||||
interpolation.Interpolate(
|
||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||
)})
|
||||
)))
|
||||
assert.ErrorContains(t, err, "no value assigned to required variable b. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_b environment variable")
|
||||
}
|
||||
|
||||
func TestVariablesEnvironmentsBlockOverrideWithUndefinedVariables(t *testing.T) {
|
||||
b := load(t, "./variables/env_overrides")
|
||||
err := bundle.Apply(context.Background(), b, []bundle.Mutator{
|
||||
err := bundle.Apply(context.Background(), b, bundle.Seq(
|
||||
mutator.SelectEnvironment("env-using-an-undefined-variable"),
|
||||
mutator.SetVariables(),
|
||||
interpolation.Interpolate(
|
||||
interpolation.IncludeLookupsInPath(variable.VariableReferencePrefix),
|
||||
)})
|
||||
)))
|
||||
assert.ErrorContains(t, err, "variable c is not defined but is assigned a value")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,179 @@
|
|||
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
|
||||
|
||||
package access_control
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "access-control",
|
||||
Short: `These APIs manage access rules on resources in an account.`,
|
||||
Long: `These APIs manage access rules on resources in an account. Currently, only
|
||||
grant rules are supported. A grant rule specifies a role assigned to a set of
|
||||
principals. A list of rules attached to a resource is called a rule set.`,
|
||||
Annotations: map[string]string{
|
||||
"package": "iam",
|
||||
},
|
||||
}
|
||||
|
||||
// start get-assignable-roles-for-resource command
|
||||
|
||||
var getAssignableRolesForResourceReq iam.GetAssignableRolesForResourceRequest
|
||||
var getAssignableRolesForResourceJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(getAssignableRolesForResourceCmd)
|
||||
// TODO: short flags
|
||||
getAssignableRolesForResourceCmd.Flags().Var(&getAssignableRolesForResourceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
var getAssignableRolesForResourceCmd = &cobra.Command{
|
||||
Use: "get-assignable-roles-for-resource RESOURCE",
|
||||
Short: `Get assignable roles for a resource.`,
|
||||
Long: `Get assignable roles for a resource.
|
||||
|
||||
Gets all the roles that can be granted on an account level resource. A role is
|
||||
grantable if the rule set on the resource can contain an access rule of the
|
||||
role.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = getAssignableRolesForResourceJson.Unmarshal(&getAssignableRolesForResourceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
getAssignableRolesForResourceReq.Resource = args[0]
|
||||
}
|
||||
|
||||
response, err := a.AccessControl.GetAssignableRolesForResource(ctx, getAssignableRolesForResourceReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start get-rule-set command
|
||||
|
||||
var getRuleSetReq iam.GetRuleSetRequest
|
||||
var getRuleSetJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(getRuleSetCmd)
|
||||
// TODO: short flags
|
||||
getRuleSetCmd.Flags().Var(&getRuleSetJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
var getRuleSetCmd = &cobra.Command{
|
||||
Use: "get-rule-set NAME ETAG",
|
||||
Short: `Get a rule set.`,
|
||||
Long: `Get a rule set.
|
||||
|
||||
Get a rule set by its name. A rule set is always attached to a resource and
|
||||
contains a list of access rules on the said resource. Currently only a default
|
||||
rule set for each resource is supported.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(2)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = getRuleSetJson.Unmarshal(&getRuleSetReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
getRuleSetReq.Name = args[0]
|
||||
getRuleSetReq.Etag = args[1]
|
||||
}
|
||||
|
||||
response, err := a.AccessControl.GetRuleSet(ctx, getRuleSetReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start update-rule-set command
|
||||
|
||||
var updateRuleSetReq iam.UpdateRuleSetRequest
|
||||
var updateRuleSetJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(updateRuleSetCmd)
|
||||
// TODO: short flags
|
||||
updateRuleSetCmd.Flags().Var(&updateRuleSetJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
var updateRuleSetCmd = &cobra.Command{
|
||||
Use: "update-rule-set",
|
||||
Short: `Update a rule set.`,
|
||||
Long: `Update a rule set.
|
||||
|
||||
Replace the rules of a rule set. First, use get to read the current version of
|
||||
the rule set before modifying it. This pattern helps prevent conflicts between
|
||||
concurrent updates.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = updateRuleSetJson.Unmarshal(&updateRuleSetReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := a.AccessControl.UpdateRuleSet(ctx, updateRuleSetReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// end service AccountAccessControl
|
|
@ -4,6 +4,7 @@ package billable_usage
|
|||
|
||||
import (
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/flags"
|
||||
"github.com/databricks/databricks-sdk-go/service/billing"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -13,15 +14,20 @@ var Cmd = &cobra.Command{
|
|||
Short: `This API allows you to download billable usage logs for the specified account and date range.`,
|
||||
Long: `This API allows you to download billable usage logs for the specified account
|
||||
and date range. This feature works with all account types.`,
|
||||
Annotations: map[string]string{
|
||||
"package": "billing",
|
||||
},
|
||||
}
|
||||
|
||||
// start download command
|
||||
|
||||
var downloadReq billing.DownloadRequest
|
||||
var downloadJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(downloadCmd)
|
||||
// TODO: short flags
|
||||
downloadCmd.Flags().Var(&downloadJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
downloadCmd.Flags().BoolVar(&downloadReq.PersonalData, "personal-data", downloadReq.PersonalData, `Specify whether to include personally identifiable information in the billable usage logs, for example the email addresses of cluster creators.`)
|
||||
|
||||
|
@ -39,13 +45,26 @@ var downloadCmd = &cobra.Command{
|
|||
[CSV file schema]: https://docs.databricks.com/administration-guide/account-settings/usage-analysis.html#schema`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
Args: cobra.ExactArgs(2),
|
||||
PreRunE: root.MustAccountClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(2)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
downloadReq.StartMonth = args[0]
|
||||
downloadReq.EndMonth = args[1]
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = downloadJson.Unmarshal(&downloadReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
downloadReq.StartMonth = args[0]
|
||||
downloadReq.EndMonth = args[1]
|
||||
}
|
||||
|
||||
err = a.BillableUsage.Download(ctx, downloadReq)
|
||||
if err != nil {
|
||||
|
@ -53,6 +72,9 @@ var downloadCmd = &cobra.Command{
|
|||
}
|
||||
return nil
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// end service BillableUsage
|
||||
|
|
|
@ -17,6 +17,12 @@ var Cmd = &cobra.Command{
|
|||
Short: `These APIs manage budget configuration including notifications for exceeding a budget for a period.`,
|
||||
Long: `These APIs manage budget configuration including notifications for exceeding a
|
||||
budget for a period. They can also retrieve the status of each budget.`,
|
||||
Annotations: map[string]string{
|
||||
"package": "billing",
|
||||
},
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
}
|
||||
|
||||
// start create command
|
||||
|
@ -43,15 +49,14 @@ var createCmd = &cobra.Command{
|
|||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
err = createJson.Unmarshal(&createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = createJson.Unmarshal(&createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
_, err = fmt.Sscan(args[0], &createReq.Budget)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid BUDGET: %s", args[0])
|
||||
}
|
||||
createReq.BudgetId = args[1]
|
||||
|
||||
response, err := a.Budgets.Create(ctx, createReq)
|
||||
if err != nil {
|
||||
|
@ -59,15 +64,20 @@ var createCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start delete command
|
||||
|
||||
var deleteReq billing.DeleteBudgetRequest
|
||||
var deleteJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(deleteCmd)
|
||||
// TODO: short flags
|
||||
deleteCmd.Flags().Var(&deleteJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
|
@ -83,21 +93,31 @@ var deleteCmd = &cobra.Command{
|
|||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
if len(args) == 0 {
|
||||
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = deleteJson.Unmarshal(&deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Budget ID")
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No BUDGET_ID argument specified. Loading names for Budgets drop-down."
|
||||
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load names for Budgets drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Budget ID")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
args = append(args, id)
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have budget id")
|
||||
}
|
||||
deleteReq.BudgetId = args[0]
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have budget id")
|
||||
}
|
||||
deleteReq.BudgetId = args[0]
|
||||
|
||||
err = a.Budgets.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
|
@ -105,15 +125,20 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
return nil
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
var getReq billing.GetBudgetRequest
|
||||
var getJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(getCmd)
|
||||
// TODO: short flags
|
||||
getCmd.Flags().Var(&getJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
|
@ -130,21 +155,31 @@ var getCmd = &cobra.Command{
|
|||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
if len(args) == 0 {
|
||||
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = getJson.Unmarshal(&getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Budget ID")
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No BUDGET_ID argument specified. Loading names for Budgets drop-down."
|
||||
names, err := a.Budgets.BudgetWithStatusNameToBudgetIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load names for Budgets drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Budget ID")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
args = append(args, id)
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have budget id")
|
||||
}
|
||||
getReq.BudgetId = args[0]
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have budget id")
|
||||
}
|
||||
getReq.BudgetId = args[0]
|
||||
|
||||
response, err := a.Budgets.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
|
@ -152,6 +187,9 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start list command
|
||||
|
@ -180,6 +218,9 @@ var listCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
@ -207,15 +248,14 @@ var updateCmd = &cobra.Command{
|
|||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
err = updateJson.Unmarshal(&updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = updateJson.Unmarshal(&updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
_, err = fmt.Sscan(args[0], &updateReq.Budget)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid BUDGET: %s", args[0])
|
||||
}
|
||||
updateReq.BudgetId = args[1]
|
||||
|
||||
err = a.Budgets.Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
|
@ -223,6 +263,9 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
return nil
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// end service Budgets
|
||||
|
|
|
@ -6,6 +6,7 @@ import (
|
|||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
account_access_control "github.com/databricks/cli/cmd/account/access-control"
|
||||
billable_usage "github.com/databricks/cli/cmd/account/billable-usage"
|
||||
budgets "github.com/databricks/cli/cmd/account/budgets"
|
||||
credentials "github.com/databricks/cli/cmd/account/credentials"
|
||||
|
@ -20,7 +21,9 @@ import (
|
|||
o_auth_enrollment "github.com/databricks/cli/cmd/account/o-auth-enrollment"
|
||||
private_access "github.com/databricks/cli/cmd/account/private-access"
|
||||
published_app_integration "github.com/databricks/cli/cmd/account/published-app-integration"
|
||||
service_principal_secrets "github.com/databricks/cli/cmd/account/service-principal-secrets"
|
||||
account_service_principals "github.com/databricks/cli/cmd/account/service-principals"
|
||||
account_settings "github.com/databricks/cli/cmd/account/settings"
|
||||
storage "github.com/databricks/cli/cmd/account/storage"
|
||||
account_storage_credentials "github.com/databricks/cli/cmd/account/storage-credentials"
|
||||
account_users "github.com/databricks/cli/cmd/account/users"
|
||||
|
@ -37,6 +40,7 @@ var accountCmd = &cobra.Command{
|
|||
func init() {
|
||||
root.RootCmd.AddCommand(accountCmd)
|
||||
|
||||
accountCmd.AddCommand(account_access_control.Cmd)
|
||||
accountCmd.AddCommand(billable_usage.Cmd)
|
||||
accountCmd.AddCommand(budgets.Cmd)
|
||||
accountCmd.AddCommand(credentials.Cmd)
|
||||
|
@ -51,11 +55,40 @@ func init() {
|
|||
accountCmd.AddCommand(o_auth_enrollment.Cmd)
|
||||
accountCmd.AddCommand(private_access.Cmd)
|
||||
accountCmd.AddCommand(published_app_integration.Cmd)
|
||||
accountCmd.AddCommand(service_principal_secrets.Cmd)
|
||||
accountCmd.AddCommand(account_service_principals.Cmd)
|
||||
accountCmd.AddCommand(account_settings.Cmd)
|
||||
accountCmd.AddCommand(storage.Cmd)
|
||||
accountCmd.AddCommand(account_storage_credentials.Cmd)
|
||||
accountCmd.AddCommand(account_users.Cmd)
|
||||
accountCmd.AddCommand(vpc_endpoints.Cmd)
|
||||
accountCmd.AddCommand(workspace_assignment.Cmd)
|
||||
accountCmd.AddCommand(workspaces.Cmd)
|
||||
|
||||
// Register commands with groups
|
||||
account_access_control.Cmd.GroupID = "iam"
|
||||
billable_usage.Cmd.GroupID = "billing"
|
||||
budgets.Cmd.GroupID = "billing"
|
||||
credentials.Cmd.GroupID = "provisioning"
|
||||
custom_app_integration.Cmd.GroupID = "oauth2"
|
||||
encryption_keys.Cmd.GroupID = "provisioning"
|
||||
account_groups.Cmd.GroupID = "iam"
|
||||
account_ip_access_lists.Cmd.GroupID = "settings"
|
||||
log_delivery.Cmd.GroupID = "billing"
|
||||
account_metastore_assignments.Cmd.GroupID = "catalog"
|
||||
account_metastores.Cmd.GroupID = "catalog"
|
||||
networks.Cmd.GroupID = "provisioning"
|
||||
o_auth_enrollment.Cmd.GroupID = "oauth2"
|
||||
private_access.Cmd.GroupID = "provisioning"
|
||||
published_app_integration.Cmd.GroupID = "oauth2"
|
||||
service_principal_secrets.Cmd.GroupID = "oauth2"
|
||||
account_service_principals.Cmd.GroupID = "iam"
|
||||
account_settings.Cmd.GroupID = "settings"
|
||||
storage.Cmd.GroupID = "provisioning"
|
||||
account_storage_credentials.Cmd.GroupID = "catalog"
|
||||
account_users.Cmd.GroupID = "iam"
|
||||
vpc_endpoints.Cmd.GroupID = "provisioning"
|
||||
workspace_assignment.Cmd.GroupID = "iam"
|
||||
workspaces.Cmd.GroupID = "provisioning"
|
||||
|
||||
}
|
||||
|
|
|
@ -20,6 +20,9 @@ var Cmd = &cobra.Command{
|
|||
Databricks can deploy clusters in the appropriate VPC for the new workspace. A
|
||||
credential configuration encapsulates this role information, and its ID is
|
||||
used when creating a new workspace.`,
|
||||
Annotations: map[string]string{
|
||||
"package": "provisioning",
|
||||
},
|
||||
}
|
||||
|
||||
// start create command
|
||||
|
@ -59,14 +62,13 @@ var createCmd = &cobra.Command{
|
|||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
err = createJson.Unmarshal(&createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
createReq.CredentialsName = args[0]
|
||||
_, err = fmt.Sscan(args[1], &createReq.AwsCredentials)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid AWS_CREDENTIALS: %s", args[1])
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = createJson.Unmarshal(&createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := a.Credentials.Create(ctx, createReq)
|
||||
|
@ -75,15 +77,20 @@ var createCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start delete command
|
||||
|
||||
var deleteReq provisioning.DeleteCredentialRequest
|
||||
var deleteJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(deleteCmd)
|
||||
// TODO: short flags
|
||||
deleteCmd.Flags().Var(&deleteJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
|
@ -101,21 +108,31 @@ var deleteCmd = &cobra.Command{
|
|||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
if len(args) == 0 {
|
||||
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = deleteJson.Unmarshal(&deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CREDENTIALS_ID argument specified. Loading names for Credentials drop-down."
|
||||
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load names for Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
args = append(args, id)
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have databricks account api credential configuration id")
|
||||
}
|
||||
deleteReq.CredentialsId = args[0]
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have databricks account api credential configuration id")
|
||||
}
|
||||
deleteReq.CredentialsId = args[0]
|
||||
|
||||
err = a.Credentials.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
|
@ -123,15 +140,20 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
return nil
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
var getReq provisioning.GetCredentialRequest
|
||||
var getJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(getCmd)
|
||||
// TODO: short flags
|
||||
getCmd.Flags().Var(&getJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
|
@ -148,21 +170,31 @@ var getCmd = &cobra.Command{
|
|||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
if len(args) == 0 {
|
||||
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = getJson.Unmarshal(&getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
if len(args) == 0 {
|
||||
promptSpinner := cmdio.Spinner(ctx)
|
||||
promptSpinner <- "No CREDENTIALS_ID argument specified. Loading names for Credentials drop-down."
|
||||
names, err := a.Credentials.CredentialCredentialsNameToCredentialsIdMap(ctx)
|
||||
close(promptSpinner)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to load names for Credentials drop-down. Please manually specify required arguments. Original error: %w", err)
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks Account API credential configuration ID")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
}
|
||||
args = append(args, id)
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have databricks account api credential configuration id")
|
||||
}
|
||||
getReq.CredentialsId = args[0]
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have databricks account api credential configuration id")
|
||||
}
|
||||
getReq.CredentialsId = args[0]
|
||||
|
||||
response, err := a.Credentials.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
|
@ -170,6 +202,9 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start list command
|
||||
|
@ -198,6 +233,9 @@ var listCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// end service Credentials
|
||||
|
|
|
@ -22,6 +22,9 @@ var Cmd = &cobra.Command{
|
|||
**Note:** You can only add/use the OAuth custom application integrations when
|
||||
OAuth enrollment status is enabled. For more details see
|
||||
:method:OAuthEnrollment/create`,
|
||||
Annotations: map[string]string{
|
||||
"package": "oauth2",
|
||||
},
|
||||
}
|
||||
|
||||
// start create command
|
||||
|
@ -46,21 +49,21 @@ var createCmd = &cobra.Command{
|
|||
|
||||
Create Custom OAuth App Integration.
|
||||
|
||||
You can retrieve the custom oauth app integration via :method:get.`,
|
||||
You can retrieve the custom oauth app integration via
|
||||
:method:CustomAppIntegration/get.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
err = createJson.Unmarshal(&createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
createReq.Name = args[0]
|
||||
_, err = fmt.Sscan(args[1], &createReq.RedirectUrls)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid REDIRECT_URLS: %s", args[1])
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = createJson.Unmarshal(&createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := a.CustomAppIntegration.Create(ctx, createReq)
|
||||
|
@ -69,15 +72,20 @@ var createCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start delete command
|
||||
|
||||
var deleteReq oauth2.DeleteCustomAppIntegrationRequest
|
||||
var deleteJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(deleteCmd)
|
||||
// TODO: short flags
|
||||
deleteCmd.Flags().Var(&deleteJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
|
@ -87,15 +95,28 @@ var deleteCmd = &cobra.Command{
|
|||
Long: `Delete Custom OAuth App Integration.
|
||||
|
||||
Delete an existing Custom OAuth App Integration. You can retrieve the custom
|
||||
oauth app integration via :method:get.`,
|
||||
oauth app integration via :method:CustomAppIntegration/get.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: root.MustAccountClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
deleteReq.IntegrationId = args[0]
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = deleteJson.Unmarshal(&deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
deleteReq.IntegrationId = args[0]
|
||||
}
|
||||
|
||||
err = a.CustomAppIntegration.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
|
@ -103,15 +124,20 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
return nil
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
var getReq oauth2.GetCustomAppIntegrationRequest
|
||||
var getJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(getCmd)
|
||||
// TODO: short flags
|
||||
getCmd.Flags().Var(&getJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
|
@ -123,12 +149,25 @@ var getCmd = &cobra.Command{
|
|||
Gets the Custom OAuth App Integration for the given integration id.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
Args: cobra.ExactArgs(1),
|
||||
PreRunE: root.MustAccountClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
getReq.IntegrationId = args[0]
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = getJson.Unmarshal(&getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
getReq.IntegrationId = args[0]
|
||||
}
|
||||
|
||||
response, err := a.CustomAppIntegration.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
|
@ -136,6 +175,9 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start list command
|
||||
|
@ -151,7 +193,7 @@ var listCmd = &cobra.Command{
|
|||
Long: `Get custom oauth app integrations.
|
||||
|
||||
Get the list of custom oauth app integrations for the specified Databricks
|
||||
Account`,
|
||||
account`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustAccountClient,
|
||||
|
@ -164,6 +206,9 @@ var listCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start update command
|
||||
|
@ -182,23 +227,33 @@ func init() {
|
|||
}
|
||||
|
||||
var updateCmd = &cobra.Command{
|
||||
Use: "update",
|
||||
Use: "update INTEGRATION_ID",
|
||||
Short: `Updates Custom OAuth App Integration.`,
|
||||
Long: `Updates Custom OAuth App Integration.
|
||||
|
||||
Updates an existing custom OAuth App Integration. You can retrieve the custom
|
||||
oauth app integration via :method:get.`,
|
||||
oauth app integration via :method:CustomAppIntegration/get.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustAccountClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
err = updateJson.Unmarshal(&updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = updateJson.Unmarshal(&updateReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
updateReq.IntegrationId = args[0]
|
||||
}
|
||||
updateReq.IntegrationId = args[0]
|
||||
|
||||
err = a.CustomAppIntegration.Update(ctx, updateReq)
|
||||
if err != nil {
|
||||
|
@ -206,6 +261,9 @@ var updateCmd = &cobra.Command{
|
|||
}
|
||||
return nil
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// end service CustomAppIntegration
|
||||
|
|
|
@ -31,6 +31,9 @@ var Cmd = &cobra.Command{
|
|||
encryption requires that the workspace is on the E2 version of the platform.
|
||||
If you have an older workspace, it might not be on the E2 version of the
|
||||
platform. If you are not sure, contact your Databricks representative.`,
|
||||
Annotations: map[string]string{
|
||||
"package": "provisioning",
|
||||
},
|
||||
}
|
||||
|
||||
// start create command
|
||||
|
@ -43,6 +46,9 @@ func init() {
|
|||
// TODO: short flags
|
||||
createCmd.Flags().Var(&createJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
// TODO: complex arg: aws_key_info
|
||||
// TODO: complex arg: gcp_key_info
|
||||
|
||||
}
|
||||
|
||||
var createCmd = &cobra.Command{
|
||||
|
@ -61,7 +67,8 @@ var createCmd = &cobra.Command{
|
|||
EBS volume data.
|
||||
|
||||
**Important**: Customer-managed keys are supported only for some deployment
|
||||
types, subscription types, and AWS regions.
|
||||
types, subscription types, and AWS regions that currently support creation of
|
||||
Databricks workspaces.
|
||||
|
||||
This operation is available only if your account is on the E2 version of the
|
||||
platform or on a select custom plan that allows multiple workspaces per
|
||||
|
@ -72,17 +79,13 @@ var createCmd = &cobra.Command{
|
|||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
err = createJson.Unmarshal(&createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = fmt.Sscan(args[0], &createReq.AwsKeyInfo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid AWS_KEY_INFO: %s", args[0])
|
||||
}
|
||||
_, err = fmt.Sscan(args[1], &createReq.UseCases)
|
||||
if err != nil {
|
||||
return fmt.Errorf("invalid USE_CASES: %s", args[1])
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = createJson.Unmarshal(&createReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
|
||||
}
|
||||
|
||||
response, err := a.EncryptionKeys.Create(ctx, createReq)
|
||||
|
@ -91,15 +94,20 @@ var createCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start delete command
|
||||
|
||||
var deleteReq provisioning.DeleteEncryptionKeyRequest
|
||||
var deleteJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(deleteCmd)
|
||||
// TODO: short flags
|
||||
deleteCmd.Flags().Var(&deleteJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
|
@ -112,25 +120,25 @@ var deleteCmd = &cobra.Command{
|
|||
delete a configuration that is associated with a running workspace.`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustAccountClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
if len(args) == 0 {
|
||||
names, err := a.EncryptionKeys.CustomerManagedKeyAwsKeyInfoKeyArnToCustomerManagedKeyIdMap(ctx)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = deleteJson.Unmarshal(&deleteReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks encryption key configuration ID")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
} else {
|
||||
deleteReq.CustomerManagedKeyId = args[0]
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have databricks encryption key configuration id")
|
||||
}
|
||||
deleteReq.CustomerManagedKeyId = args[0]
|
||||
|
||||
err = a.EncryptionKeys.Delete(ctx, deleteReq)
|
||||
if err != nil {
|
||||
|
@ -138,15 +146,20 @@ var deleteCmd = &cobra.Command{
|
|||
}
|
||||
return nil
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start get command
|
||||
|
||||
var getReq provisioning.GetEncryptionKeyRequest
|
||||
var getJson flags.JsonFlag
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand(getCmd)
|
||||
// TODO: short flags
|
||||
getCmd.Flags().Var(&getJson, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
|
||||
}
|
||||
|
||||
|
@ -169,28 +182,28 @@ var getCmd = &cobra.Command{
|
|||
types, subscription types, and AWS regions.
|
||||
|
||||
This operation is available only if your account is on the E2 version of the
|
||||
platform.`,
|
||||
platform.",`,
|
||||
|
||||
Annotations: map[string]string{},
|
||||
PreRunE: root.MustAccountClient,
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs(1)
|
||||
if cmd.Flags().Changed("json") {
|
||||
check = cobra.ExactArgs(0)
|
||||
}
|
||||
return check(cmd, args)
|
||||
},
|
||||
PreRunE: root.MustAccountClient,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
a := root.AccountClient(ctx)
|
||||
if len(args) == 0 {
|
||||
names, err := a.EncryptionKeys.CustomerManagedKeyAwsKeyInfoKeyArnToCustomerManagedKeyIdMap(ctx)
|
||||
if cmd.Flags().Changed("json") {
|
||||
err = getJson.Unmarshal(&getReq)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
id, err := cmdio.Select(ctx, names, "Databricks encryption key configuration ID")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
args = append(args, id)
|
||||
} else {
|
||||
getReq.CustomerManagedKeyId = args[0]
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have databricks encryption key configuration id")
|
||||
}
|
||||
getReq.CustomerManagedKeyId = args[0]
|
||||
|
||||
response, err := a.EncryptionKeys.Get(ctx, getReq)
|
||||
if err != nil {
|
||||
|
@ -198,6 +211,9 @@ var getCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// start list command
|
||||
|
@ -237,6 +253,9 @@ var listCmd = &cobra.Command{
|
|||
}
|
||||
return cmdio.Render(ctx, response)
|
||||
},
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
}
|
||||
|
||||
// end service EncryptionKeys
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
package account
|
||||
|
||||
import "github.com/spf13/cobra"
|
||||
|
||||
// Groups returns an ordered list of command groups.
|
||||
// The order matches the order used in the Databricks API explorer.
|
||||
func Groups() []cobra.Group {
|
||||
return []cobra.Group{
|
||||
{
|
||||
ID: "iam",
|
||||
Title: "Identity and Access Management",
|
||||
},
|
||||
{
|
||||
ID: "catalog",
|
||||
Title: "Unity Catalog",
|
||||
},
|
||||
{
|
||||
ID: "settings",
|
||||
Title: "Settings",
|
||||
},
|
||||
{
|
||||
ID: "provisioning",
|
||||
Title: "Provisioning",
|
||||
},
|
||||
{
|
||||
ID: "billing",
|
||||
Title: "Billing",
|
||||
},
|
||||
{
|
||||
ID: "oauth2",
|
||||
Title: "OAuth",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func init() {
|
||||
// Register groups with parent command
|
||||
groups := Groups()
|
||||
for i := range groups {
|
||||
accountCmd.AddGroup(&groups[i])
|
||||
}
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue