mirror of https://github.com/databricks/cli.git
This commit is contained in:
commit
80c66a6f20
|
@ -6,5 +6,8 @@
|
|||
"batch": {
|
||||
".codegen/cmds-workspace.go.tmpl": "cmd/workspace/cmd.go",
|
||||
".codegen/cmds-account.go.tmpl": "cmd/account/cmd.go"
|
||||
},
|
||||
"toolchain": {
|
||||
"required": ["go"]
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
09a7fa63d9ae243e5407941f200960ca14d48b07
|
|
@ -0,0 +1,40 @@
|
|||
# Version changelog
|
||||
|
||||
## {{.Version}}
|
||||
|
||||
CLI:
|
||||
{{- range .Changes}}
|
||||
* {{.}}.
|
||||
{{- end}}
|
||||
|
||||
Bundles:
|
||||
* **FILL THIS IN MANUALLY BY MOVING RELEVANT ITEMS FROM ABOVE LIST**
|
||||
|
||||
Internal:
|
||||
* **FILL THIS IN MANUALLY BY MOVING RELEVANT ITEMS FROM ABOVE LIST**
|
||||
|
||||
{{ if .ApiChanges -}}
|
||||
API Changes:
|
||||
{{- range .ApiChanges}}{{if or (eq .X "method") (eq .X "service")}}
|
||||
* {{.Action}} {{template "what" .}}{{if .Extra}} {{.Extra}}{{with .Other}} {{template "what" .}}{{end}}{{end}}.
|
||||
{{- end}}{{- end}}
|
||||
|
||||
OpenAPI commit {{.Sha}} ({{.Changed}})
|
||||
{{- end }}
|
||||
|
||||
{{- if .DependencyUpdates }}
|
||||
Dependency updates:
|
||||
{{- range .DependencyUpdates}}
|
||||
* {{.}}.
|
||||
{{- end -}}
|
||||
{{end}}
|
||||
|
||||
## {{.PrevVersion}}
|
||||
|
||||
{{- define "what" -}}
|
||||
{{if eq .X "service" -}}
|
||||
`databricks {{if .Service.IsAccounts}}account {{end -}}{{(.Service.TrimPrefix "account").KebabName}}` command group
|
||||
{{- else if eq .X "method" -}}
|
||||
`databricks {{if .Method.Service.IsAccounts}}account {{end -}}{{(.Method.Service.TrimPrefix "account").KebabName}} {{.Method.KebabName}}` command
|
||||
{{- end}}
|
||||
{{- end -}}
|
|
@ -11,20 +11,21 @@ import (
|
|||
{{.SnakeName}} "github.com/databricks/cli/cmd/account/{{(.TrimPrefix "account").KebabName}}"{{end}}{{end}}{{end}}
|
||||
)
|
||||
|
||||
var accountCmd = &cobra.Command{
|
||||
Use: "account",
|
||||
Short: `Databricks Account Commands`,
|
||||
}
|
||||
|
||||
func init() {
|
||||
root.RootCmd.AddCommand(accountCmd)
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "account",
|
||||
Short: `Databricks Account Commands`,
|
||||
}
|
||||
|
||||
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||
accountCmd.AddCommand({{.SnakeName}}.Cmd)
|
||||
cmd.AddCommand({{.SnakeName}}.New())
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
// Register commands with groups
|
||||
{{range .Services}}{{if .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||
{{.SnakeName}}.Cmd.GroupID = "{{ .Package.Name }}"
|
||||
{{end}}{{end}}{{end}}
|
||||
// Register all groups with the parent command.
|
||||
groups := Groups()
|
||||
for i := range groups {
|
||||
cmd.AddGroup(&groups[i])
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
|
|
@ -2,7 +2,15 @@
|
|||
|
||||
package workspace
|
||||
|
||||
{{ $excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions" "account-access-control-proxy" }}
|
||||
{{ $excludes :=
|
||||
list
|
||||
"command-execution"
|
||||
"statement-execution"
|
||||
"dbfs"
|
||||
"dbsql-permissions"
|
||||
"account-access-control-proxy"
|
||||
"files"
|
||||
}}
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
|
@ -10,13 +18,12 @@ import (
|
|||
{{.SnakeName}} "github.com/databricks/cli/cmd/workspace/{{.KebabName}}"{{end}}{{end}}{{end}}
|
||||
)
|
||||
|
||||
func init() {
|
||||
func All() []*cobra.Command {
|
||||
var out []*cobra.Command
|
||||
|
||||
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||
root.RootCmd.AddCommand({{.SnakeName}}.Cmd)
|
||||
out = append(out, {{.SnakeName}}.New())
|
||||
{{end}}{{end}}{{end}}
|
||||
|
||||
// Register commands with groups
|
||||
{{range .Services}}{{if not .IsAccounts}}{{if not (in $excludes .KebabName) -}}
|
||||
{{.SnakeName}}.Cmd.GroupID = "{{ .Package.Name }}"
|
||||
{{end}}{{end}}{{end}}
|
||||
return out
|
||||
}
|
||||
|
|
|
@ -10,7 +10,15 @@ import (
|
|||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
{{ $excludes := list "command-execution" "statement-execution" "dbfs" "dbsql-permissions" "account-access-control-proxy" }}
|
||||
{{ $excludes :=
|
||||
list
|
||||
"command-execution"
|
||||
"statement-execution"
|
||||
"dbfs"
|
||||
"dbsql-permissions"
|
||||
"account-access-control-proxy"
|
||||
"files"
|
||||
}}
|
||||
|
||||
{{if not (in $excludes .KebabName) }}
|
||||
{{template "service" .}}
|
||||
|
@ -19,20 +27,34 @@ import (
|
|||
{{end}}
|
||||
|
||||
{{define "service"}}
|
||||
var Cmd = &cobra.Command{
|
||||
Use: "{{(.TrimPrefix "account").KebabName}}",
|
||||
{{- if .Description }}
|
||||
Short: `{{.Summary | without "`"}}`,
|
||||
Long: `{{.Comment " " 80 | without "`"}}`,
|
||||
{{- end }}
|
||||
Annotations: map[string]string{
|
||||
"package": "{{ .Package.Name }}",
|
||||
},
|
||||
{{- if .IsPrivatePreview }}
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var cmdOverrides []func(*cobra.Command)
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
{{- end }}
|
||||
func New() *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "{{(.TrimPrefix "account").KebabName}}",
|
||||
{{- if .Description }}
|
||||
Short: `{{.Summary | without "`"}}`,
|
||||
Long: `{{.Comment " " 80 | without "`"}}`,
|
||||
{{- end }}
|
||||
GroupID: "{{ .Package.Name }}",
|
||||
Annotations: map[string]string{
|
||||
"package": "{{ .Package.Name }}",
|
||||
},
|
||||
{{- if .IsPrivatePreview }}
|
||||
|
||||
// This service is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
{{- end }}
|
||||
}
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range cmdOverrides {
|
||||
fn(cmd)
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
{{- $serviceName := .KebabName -}}
|
||||
|
@ -44,26 +66,39 @@ var Cmd = &cobra.Command{
|
|||
{{end}}
|
||||
// start {{.KebabName}} command
|
||||
|
||||
{{- $useJsonForAllFields := or .IsJsonOnly (and .Request (or (not .Request.IsAllRequiredFieldsPrimitive) .Request.HasRequiredNonBodyField)) -}}
|
||||
{{- $needJsonFlag := or $useJsonForAllFields (and .Request (not .Request.IsOnlyPrimitiveFields)) -}}
|
||||
{{- if .Request}}
|
||||
var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
||||
{{- if $needJsonFlag}}
|
||||
var {{.CamelName}}Json flags.JsonFlag
|
||||
{{- end}}
|
||||
{{end}}
|
||||
{{if .Wait}}var {{.CamelName}}SkipWait bool
|
||||
var {{.CamelName}}Timeout time.Duration{{end}}
|
||||
// Slice with functions to override default command behavior.
|
||||
// Functions can be added from the `init()` function in manually curated files in this directory.
|
||||
var {{.CamelName}}Overrides []func(
|
||||
*cobra.Command,
|
||||
{{- if .Request }}
|
||||
*{{.Service.Package.Name}}.{{.Request.PascalName}},
|
||||
{{- end }}
|
||||
)
|
||||
|
||||
func new{{.PascalName}}() *cobra.Command {
|
||||
cmd := &cobra.Command{}
|
||||
|
||||
{{- $useJsonForAllFields := or .IsJsonOnly (and .Request (or (not .Request.IsAllRequiredFieldsPrimitive) .Request.HasRequiredNonBodyField)) -}}
|
||||
{{- $needJsonFlag := or $useJsonForAllFields (and .Request (not .Request.IsOnlyPrimitiveFields)) -}}
|
||||
|
||||
{{- if .Request}}
|
||||
|
||||
var {{.CamelName}}Req {{.Service.Package.Name}}.{{.Request.PascalName}}
|
||||
{{- if $needJsonFlag}}
|
||||
var {{.CamelName}}Json flags.JsonFlag
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
|
||||
{{if .Wait}}var {{.CamelName}}SkipWait bool
|
||||
var {{.CamelName}}Timeout time.Duration{{end}}
|
||||
|
||||
func init() {
|
||||
Cmd.AddCommand({{.CamelName}}Cmd)
|
||||
{{if .Wait}}
|
||||
{{.CamelName}}Cmd.Flags().BoolVar(&{{.CamelName}}SkipWait, "no-wait", {{.CamelName}}SkipWait, `do not wait to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||
{{.CamelName}}Cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||
cmd.Flags().BoolVar(&{{.CamelName}}SkipWait, "no-wait", {{.CamelName}}SkipWait, `do not wait to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||
cmd.Flags().DurationVar(&{{.CamelName}}Timeout, "timeout", {{.Wait.Timeout}}*time.Minute, `maximum amount of time to reach {{range $i, $e := .Wait.Success}}{{if $i}} or {{end}}{{.Content}}{{end}} state`)
|
||||
{{end -}}
|
||||
{{if .Request}}// TODO: short flags
|
||||
{{- if $needJsonFlag}}
|
||||
{{.CamelName}}Cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
cmd.Flags().Var(&{{.CamelName}}Json, "json", `either inline JSON string or @path/to/file.json with request body`)
|
||||
{{- end}}
|
||||
{{$method := .}}
|
||||
{{ if not .IsJsonOnly }}
|
||||
|
@ -74,38 +109,39 @@ func init() {
|
|||
{{else if .Entity.ArrayValue }}// TODO: array: {{.Name}}
|
||||
{{else if .Entity.MapValue }}// TODO: map via StringToStringVar: {{.Name}}
|
||||
{{else if .Entity.IsEmpty }}// TODO: output-only field
|
||||
{{else if .Entity.Enum }}{{$method.CamelName}}Cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`"}}`)
|
||||
{{else}}{{$method.CamelName}}Cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
|
||||
{{else if .Entity.Enum }}cmd.Flags().Var(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", `{{.Summary | without "`"}}`)
|
||||
{{else}}cmd.Flags().{{template "arg-type" .Entity}}(&{{$method.CamelName}}Req.{{.PascalName}}, "{{.KebabName}}", {{$method.CamelName}}Req.{{.PascalName}}, `{{.Summary | without "`"}}`)
|
||||
{{end}}
|
||||
{{- end -}}
|
||||
{{- end}}
|
||||
{{- end}}
|
||||
{{end}}
|
||||
}
|
||||
{{- $excludeFromPrompts := list "workspace get-status" -}}
|
||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||
|
||||
{{ $hasPosArgs := and .Request (or .Request.IsAllRequiredFieldsPrimitive (eq .PascalName "RunNow")) -}}
|
||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
||||
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
|
||||
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
|
||||
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
||||
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
||||
var {{.CamelName}}Cmd = &cobra.Command{
|
||||
Use: "{{.KebabName}}{{if $hasPosArgs}}{{range .Request.RequiredFields}} {{.ConstantName}}{{end}}{{end}}",
|
||||
{{- $excludeFromPrompts := list "workspace get-status" -}}
|
||||
{{- $fullCommandName := (print $serviceName " " .KebabName) -}}
|
||||
{{- $noPrompt := or .IsCrudCreate (in $excludeFromPrompts $fullCommandName) }}
|
||||
|
||||
{{- $hasPosArgs := and .Request (or .Request.IsAllRequiredFieldsPrimitive (eq .PascalName "RunNow")) -}}
|
||||
{{- $hasSinglePosArg := and $hasPosArgs (eq 1 (len .Request.RequiredFields)) -}}
|
||||
{{- $serviceHasNamedIdMap := and (and .Service.List .Service.List.NamedIdMap) (not (eq .PascalName "List")) -}}
|
||||
{{- $hasIdPrompt := and (not $noPrompt) (and $hasSinglePosArg $serviceHasNamedIdMap) -}}
|
||||
{{- $wait := and .Wait (and (not .IsCrudRead) (not (eq .SnakeName "get_run"))) -}}
|
||||
{{- $hasRequiredArgs := and (not $hasIdPrompt) $hasPosArgs -}}
|
||||
|
||||
cmd.Use = "{{.KebabName}}{{if $hasPosArgs}}{{range .Request.RequiredFields}} {{.ConstantName}}{{end}}{{end}}"
|
||||
{{- if .Description }}
|
||||
Short: `{{.Summary | without "`"}}`,
|
||||
Long: `{{.Comment " " 80 | without "`"}}`,
|
||||
cmd.Short = `{{.Summary | without "`"}}`
|
||||
cmd.Long = `{{.Comment " " 80 | without "`"}}`
|
||||
{{- end }}
|
||||
{{- if .IsPrivatePreview }}
|
||||
|
||||
// This command is being previewed; hide from help output.
|
||||
Hidden: true,
|
||||
cmd.Hidden = true
|
||||
{{- end }}
|
||||
|
||||
Annotations: map[string]string{},{{if $hasRequiredArgs }}
|
||||
Args: func(cmd *cobra.Command, args []string) error {
|
||||
cmd.Annotations = make(map[string]string)
|
||||
{{if $hasRequiredArgs }}
|
||||
cmd.Args = func(cmd *cobra.Command, args []string) error {
|
||||
check := cobra.ExactArgs({{len .Request.RequiredFields}})
|
||||
{{- if $useJsonForAllFields }}
|
||||
if cmd.Flags().Changed("json") {
|
||||
|
@ -113,9 +149,10 @@ var {{.CamelName}}Cmd = &cobra.Command{
|
|||
}
|
||||
{{- end }}
|
||||
return check(cmd, args)
|
||||
},{{end}}
|
||||
PreRunE: root.Must{{if .Service.IsAccounts}}Account{{else}}Workspace{{end}}Client,
|
||||
RunE: func(cmd *cobra.Command, args []string) (err error) {
|
||||
}
|
||||
{{end}}
|
||||
cmd.PreRunE = root.Must{{if .Service.IsAccounts}}Account{{else}}Workspace{{end}}Client
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
|
||||
ctx := cmd.Context()
|
||||
{{if .Service.IsAccounts}}a := root.AccountClient(ctx){{else}}w := root.WorkspaceClient(ctx){{end}}
|
||||
{{- if .Request }}
|
||||
|
@ -204,10 +241,24 @@ var {{.CamelName}}Cmd = &cobra.Command{
|
|||
{{- else -}}
|
||||
{{template "method-call" .}}
|
||||
{{end -}}
|
||||
},
|
||||
}
|
||||
|
||||
// Disable completions since they are not applicable.
|
||||
// Can be overridden by manual implementation in `override.go`.
|
||||
ValidArgsFunction: cobra.NoFileCompletions,
|
||||
cmd.ValidArgsFunction = cobra.NoFileCompletions
|
||||
|
||||
// Apply optional overrides to this command.
|
||||
for _, fn := range {{.CamelName}}Overrides {
|
||||
fn(cmd{{if .Request}}, &{{.CamelName}}Req{{end}})
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func init() {
|
||||
cmdOverrides = append(cmdOverrides, func(cmd *cobra.Command) {
|
||||
cmd.AddCommand(new{{.PascalName}}())
|
||||
})
|
||||
}
|
||||
{{end}}
|
||||
// end service {{.Name}}{{end}}
|
||||
|
|
|
@ -10,6 +10,7 @@ cmd/account/ip-access-lists/ip-access-lists.go linguist-generated=true
|
|||
cmd/account/log-delivery/log-delivery.go linguist-generated=true
|
||||
cmd/account/metastore-assignments/metastore-assignments.go linguist-generated=true
|
||||
cmd/account/metastores/metastores.go linguist-generated=true
|
||||
cmd/account/network-policy/network-policy.go linguist-generated=true
|
||||
cmd/account/networks/networks.go linguist-generated=true
|
||||
cmd/account/o-auth-enrollment/o-auth-enrollment.go linguist-generated=true
|
||||
cmd/account/private-access/private-access.go linguist-generated=true
|
||||
|
@ -24,12 +25,15 @@ cmd/account/vpc-endpoints/vpc-endpoints.go linguist-generated=true
|
|||
cmd/account/workspace-assignment/workspace-assignment.go linguist-generated=true
|
||||
cmd/account/workspaces/workspaces.go linguist-generated=true
|
||||
cmd/workspace/alerts/alerts.go linguist-generated=true
|
||||
cmd/workspace/artifact-allowlists/artifact-allowlists.go linguist-generated=true
|
||||
cmd/workspace/catalogs/catalogs.go linguist-generated=true
|
||||
cmd/workspace/clean-rooms/clean-rooms.go linguist-generated=true
|
||||
cmd/workspace/cluster-policies/cluster-policies.go linguist-generated=true
|
||||
cmd/workspace/clusters/clusters.go linguist-generated=true
|
||||
cmd/workspace/cmd.go linguist-generated=true
|
||||
cmd/workspace/connections/connections.go linguist-generated=true
|
||||
cmd/workspace/current-user/current-user.go linguist-generated=true
|
||||
cmd/workspace/dashboard-widgets/dashboard-widgets.go linguist-generated=true
|
||||
cmd/workspace/dashboards/dashboards.go linguist-generated=true
|
||||
cmd/workspace/data-sources/data-sources.go linguist-generated=true
|
||||
cmd/workspace/experiments/experiments.go linguist-generated=true
|
||||
|
@ -46,14 +50,17 @@ cmd/workspace/jobs/jobs.go linguist-generated=true
|
|||
cmd/workspace/libraries/libraries.go linguist-generated=true
|
||||
cmd/workspace/metastores/metastores.go linguist-generated=true
|
||||
cmd/workspace/model-registry/model-registry.go linguist-generated=true
|
||||
cmd/workspace/model-versions/model-versions.go linguist-generated=true
|
||||
cmd/workspace/permissions/permissions.go linguist-generated=true
|
||||
cmd/workspace/pipelines/pipelines.go linguist-generated=true
|
||||
cmd/workspace/policy-families/policy-families.go linguist-generated=true
|
||||
cmd/workspace/providers/providers.go linguist-generated=true
|
||||
cmd/workspace/queries/queries.go linguist-generated=true
|
||||
cmd/workspace/query-history/query-history.go linguist-generated=true
|
||||
cmd/workspace/query-visualizations/query-visualizations.go linguist-generated=true
|
||||
cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=true
|
||||
cmd/workspace/recipients/recipients.go linguist-generated=true
|
||||
cmd/workspace/registered-models/registered-models.go linguist-generated=true
|
||||
cmd/workspace/repos/repos.go linguist-generated=true
|
||||
cmd/workspace/schemas/schemas.go linguist-generated=true
|
||||
cmd/workspace/secrets/secrets.go linguist-generated=true
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
name: publish-latest
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
|
||||
workflow_call:
|
||||
|
||||
jobs:
|
||||
publish:
|
||||
runs-on: ubuntu-22.04
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: release-s3
|
||||
|
||||
- name: Install s3cmd
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install s3cmd
|
||||
|
||||
- name: Publish to S3
|
||||
working-directory: ./scripts
|
||||
run: ./publish_to_s3.sh
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
|
||||
AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
|
|
@ -3,8 +3,16 @@ name: build
|
|||
on:
|
||||
pull_request:
|
||||
types: [opened, synchronize]
|
||||
merge_group:
|
||||
types: [checks_requested]
|
||||
push:
|
||||
branches: [main]
|
||||
# Always run on push to main. The build cache can only be reused
|
||||
# if it was saved by a run from the repository's default branch.
|
||||
# The run result will be identical to that from the merge queue
|
||||
# because the commit is identical, yet we need to perform it to
|
||||
# seed the build cache.
|
||||
branches:
|
||||
- main
|
||||
|
||||
jobs:
|
||||
tests:
|
||||
|
@ -20,16 +28,15 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout repository and submodules
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Unshallow
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.19.5
|
||||
cache: true
|
||||
go-version: 1.21.0
|
||||
|
||||
- name: Set go env
|
||||
run: |
|
||||
|
@ -39,7 +46,9 @@ jobs:
|
|||
go install honnef.co/go/tools/cmd/staticcheck@latest
|
||||
|
||||
- name: Pull external libraries
|
||||
run: make vendor
|
||||
run: |
|
||||
make vendor
|
||||
pip3 install wheel
|
||||
|
||||
- name: Run tests
|
||||
run: make test
|
||||
|
@ -49,14 +58,12 @@ jobs:
|
|||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
# Use 1.19 because of godoc formatting.
|
||||
# See https://tip.golang.org/doc/go1.19#go-doc.
|
||||
go-version: 1.19
|
||||
go-version: 1.21.0
|
||||
|
||||
# No need to download cached dependencies when running gofmt.
|
||||
cache: false
|
||||
|
|
|
@ -13,32 +13,15 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository and submodules
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Unshallow
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.19.5
|
||||
|
||||
- name: Locate cache paths
|
||||
id: cache
|
||||
run: |
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
||||
|
||||
# Note: use custom caching because below performs a cross platform build
|
||||
# through goreleaser and don't want to share a cache with the test builds.
|
||||
- name: Setup caching
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ steps.cache.outputs.GOMODCACHE }}
|
||||
${{ steps.cache.outputs.GOCACHE }}
|
||||
key: release-${{ runner.os }}-go-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', '.goreleaser.yaml') }}
|
||||
go-version: 1.21.0
|
||||
|
||||
- name: Hide snapshot tag to outsmart GoReleaser
|
||||
run: git tag -d snapshot || true
|
||||
|
|
|
@ -12,32 +12,15 @@ jobs:
|
|||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout repository and submodules
|
||||
uses: actions/checkout@v3
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Unshallow
|
||||
run: git fetch --prune --unshallow
|
||||
|
||||
- name: Setup Go
|
||||
id: go
|
||||
uses: actions/setup-go@v3
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: 1.19.5
|
||||
|
||||
- name: Locate cache paths
|
||||
id: cache
|
||||
run: |
|
||||
echo "GOMODCACHE=$(go env GOMODCACHE)" >> $GITHUB_OUTPUT
|
||||
echo "GOCACHE=$(go env GOCACHE)" >> $GITHUB_OUTPUT
|
||||
|
||||
# Note: use custom caching because below performs a cross platform build
|
||||
# through goreleaser and don't want to share a cache with the test builds.
|
||||
- name: Setup caching
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
${{ steps.cache.outputs.GOMODCACHE }}
|
||||
${{ steps.cache.outputs.GOCACHE }}
|
||||
key: release-${{ runner.os }}-go-${{ steps.go.outputs.go-version }}-${{ hashFiles('go.sum', '.goreleaser.yaml') }}
|
||||
go-version: 1.21.0
|
||||
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v4
|
||||
|
@ -46,8 +29,3 @@ jobs:
|
|||
args: release
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
publish:
|
||||
uses: ./.github/workflows/publish-latest.yml
|
||||
needs: goreleaser
|
||||
secrets: inherit
|
||||
|
|
|
@ -28,3 +28,6 @@ __pycache__
|
|||
.terraform.lock.hcl
|
||||
|
||||
.vscode/launch.json
|
||||
.vscode/tasks.json
|
||||
|
||||
.databricks
|
||||
|
|
|
@ -0,0 +1,3 @@
|
|||
# Typings for Pylance in VS Code
|
||||
# see https://github.com/microsoft/pyright/blob/main/docs/builtins.md
|
||||
from databricks.sdk.runtime import *
|
|
@ -7,5 +7,8 @@
|
|||
"files.insertFinalNewline": true,
|
||||
"files.trimFinalNewlines": true,
|
||||
"python.envFile": "${workspaceFolder}/.databricks/.databricks.env",
|
||||
"databricks.python.envFile": "${workspaceFolder}/.env"
|
||||
"databricks.python.envFile": "${workspaceFolder}/.env",
|
||||
"python.analysis.stubPath": ".vscode",
|
||||
"jupyter.interactiveWindow.cellMarker.codeRegex": "^# COMMAND ----------|^# Databricks notebook source|^(#\\s*%%|#\\s*\\<codecell\\>|#\\s*In\\[\\d*?\\]|#\\s*In\\[ \\])",
|
||||
"jupyter.interactiveWindow.cellMarker.default": "# COMMAND ----------"
|
||||
}
|
||||
|
|
289
CHANGELOG.md
289
CHANGELOG.md
|
@ -1,5 +1,294 @@
|
|||
# Version changelog
|
||||
|
||||
## 0.205.0
|
||||
|
||||
This release marks the public preview phase of Databricks Asset Bundles.
|
||||
|
||||
For more information, please refer to our online documentation at
|
||||
https://docs.databricks.com/en/dev-tools/bundles/.
|
||||
|
||||
CLI:
|
||||
* Prompt once for a client profile ([#727](https://github.com/databricks/cli/pull/727)).
|
||||
|
||||
Bundles:
|
||||
* Use clearer error message when no interpolation value is found. ([#764](https://github.com/databricks/cli/pull/764)).
|
||||
* Use interactive prompt to select resource to run if not specified ([#762](https://github.com/databricks/cli/pull/762)).
|
||||
* Add documentation link bundle command group description ([#770](https://github.com/databricks/cli/pull/770)).
|
||||
|
||||
|
||||
## 0.204.1
|
||||
|
||||
Bundles:
|
||||
* Fix conversion of job parameters ([#744](https://github.com/databricks/cli/pull/744)).
|
||||
* Add schema and config validation to jsonschema package ([#740](https://github.com/databricks/cli/pull/740)).
|
||||
* Support Model Serving Endpoints in bundles ([#682](https://github.com/databricks/cli/pull/682)).
|
||||
* Do not include empty output in job run output ([#749](https://github.com/databricks/cli/pull/749)).
|
||||
* Fixed marking libraries from DBFS as remote ([#750](https://github.com/databricks/cli/pull/750)).
|
||||
* Process only Python wheel tasks which have local libraries used ([#751](https://github.com/databricks/cli/pull/751)).
|
||||
* Add enum support for bundle templates ([#668](https://github.com/databricks/cli/pull/668)).
|
||||
* Apply Python wheel trampoline if workspace library is used ([#755](https://github.com/databricks/cli/pull/755)).
|
||||
* List available targets when incorrect target passed ([#756](https://github.com/databricks/cli/pull/756)).
|
||||
* Make bundle and sync fields optional ([#757](https://github.com/databricks/cli/pull/757)).
|
||||
* Consolidate environment variable interaction ([#747](https://github.com/databricks/cli/pull/747)).
|
||||
|
||||
Internal:
|
||||
* Update Go SDK to v0.19.1 ([#759](https://github.com/databricks/cli/pull/759)).
|
||||
|
||||
|
||||
|
||||
## 0.204.0
|
||||
|
||||
This release includes permission related commands for a subset of workspace
|
||||
services where they apply. These complement the `permissions` command and
|
||||
do not require specification of the object type to work with, as that is
|
||||
implied by the command they are nested under.
|
||||
|
||||
CLI:
|
||||
* Group permission related commands ([#730](https://github.com/databricks/cli/pull/730)).
|
||||
|
||||
Bundles:
|
||||
* Fixed artifact file uploading on Windows and wheel execution on DBR 13.3 ([#722](https://github.com/databricks/cli/pull/722)).
|
||||
* Make resource and artifact paths in bundle config relative to config folder ([#708](https://github.com/databricks/cli/pull/708)).
|
||||
* Add support for ordering of input prompts ([#662](https://github.com/databricks/cli/pull/662)).
|
||||
* Fix IsServicePrincipal() only working for workspace admins ([#732](https://github.com/databricks/cli/pull/732)).
|
||||
* databricks bundle init template v1 ([#686](https://github.com/databricks/cli/pull/686)).
|
||||
* databricks bundle init template v2: optional stubs, DLT support ([#700](https://github.com/databricks/cli/pull/700)).
|
||||
* Show 'databricks bundle init' template in CLI prompt ([#725](https://github.com/databricks/cli/pull/725)).
|
||||
* Include in set of environment variables to pass along. ([#736](https://github.com/databricks/cli/pull/736)).
|
||||
|
||||
Internal:
|
||||
* Update Go SDK to v0.19.0 ([#729](https://github.com/databricks/cli/pull/729)).
|
||||
* Replace API call to test configuration with dummy authenticate call ([#728](https://github.com/databricks/cli/pull/728)).
|
||||
|
||||
API Changes:
|
||||
* Changed `databricks account storage-credentials create` command to return .
|
||||
* Changed `databricks account storage-credentials get` command to return .
|
||||
* Changed `databricks account storage-credentials list` command to return .
|
||||
* Changed `databricks account storage-credentials update` command to return .
|
||||
* Changed `databricks connections create` command with new required argument order.
|
||||
* Changed `databricks connections update` command with new required argument order.
|
||||
* Changed `databricks volumes create` command with new required argument order.
|
||||
* Added `databricks artifact-allowlists` command group.
|
||||
* Added `databricks model-versions` command group.
|
||||
* Added `databricks registered-models` command group.
|
||||
* Added `databricks cluster-policies get-permission-levels` command.
|
||||
* Added `databricks cluster-policies get-permissions` command.
|
||||
* Added `databricks cluster-policies set-permissions` command.
|
||||
* Added `databricks cluster-policies update-permissions` command.
|
||||
* Added `databricks clusters get-permission-levels` command.
|
||||
* Added `databricks clusters get-permissions` command.
|
||||
* Added `databricks clusters set-permissions` command.
|
||||
* Added `databricks clusters update-permissions` command.
|
||||
* Added `databricks instance-pools get-permission-levels` command.
|
||||
* Added `databricks instance-pools get-permissions` command.
|
||||
* Added `databricks instance-pools set-permissions` command.
|
||||
* Added `databricks instance-pools update-permissions` command.
|
||||
* Added `databricks files` command group.
|
||||
* Changed `databricks permissions set` command to start returning .
|
||||
* Changed `databricks permissions update` command to start returning .
|
||||
* Added `databricks users get-permission-levels` command.
|
||||
* Added `databricks users get-permissions` command.
|
||||
* Added `databricks users set-permissions` command.
|
||||
* Added `databricks users update-permissions` command.
|
||||
* Added `databricks jobs get-permission-levels` command.
|
||||
* Added `databricks jobs get-permissions` command.
|
||||
* Added `databricks jobs set-permissions` command.
|
||||
* Added `databricks jobs update-permissions` command.
|
||||
* Changed `databricks experiments get-by-name` command to return .
|
||||
* Changed `databricks experiments get-experiment` command to return .
|
||||
* Added `databricks experiments delete-runs` command.
|
||||
* Added `databricks experiments get-permission-levels` command.
|
||||
* Added `databricks experiments get-permissions` command.
|
||||
* Added `databricks experiments restore-runs` command.
|
||||
* Added `databricks experiments set-permissions` command.
|
||||
* Added `databricks experiments update-permissions` command.
|
||||
* Added `databricks model-registry get-permission-levels` command.
|
||||
* Added `databricks model-registry get-permissions` command.
|
||||
* Added `databricks model-registry set-permissions` command.
|
||||
* Added `databricks model-registry update-permissions` command.
|
||||
* Added `databricks pipelines get-permission-levels` command.
|
||||
* Added `databricks pipelines get-permissions` command.
|
||||
* Added `databricks pipelines set-permissions` command.
|
||||
* Added `databricks pipelines update-permissions` command.
|
||||
* Added `databricks serving-endpoints get-permission-levels` command.
|
||||
* Added `databricks serving-endpoints get-permissions` command.
|
||||
* Added `databricks serving-endpoints set-permissions` command.
|
||||
* Added `databricks serving-endpoints update-permissions` command.
|
||||
* Added `databricks token-management get-permission-levels` command.
|
||||
* Added `databricks token-management get-permissions` command.
|
||||
* Added `databricks token-management set-permissions` command.
|
||||
* Added `databricks token-management update-permissions` command.
|
||||
* Changed `databricks dashboards create` command with new required argument order.
|
||||
* Added `databricks warehouses get-permission-levels` command.
|
||||
* Added `databricks warehouses get-permissions` command.
|
||||
* Added `databricks warehouses set-permissions` command.
|
||||
* Added `databricks warehouses update-permissions` command.
|
||||
* Added `databricks dashboard-widgets` command group.
|
||||
* Added `databricks query-visualizations` command group.
|
||||
* Added `databricks repos get-permission-levels` command.
|
||||
* Added `databricks repos get-permissions` command.
|
||||
* Added `databricks repos set-permissions` command.
|
||||
* Added `databricks repos update-permissions` command.
|
||||
* Added `databricks secrets get-secret` command.
|
||||
* Added `databricks workspace get-permission-levels` command.
|
||||
* Added `databricks workspace get-permissions` command.
|
||||
* Added `databricks workspace set-permissions` command.
|
||||
* Added `databricks workspace update-permissions` command.
|
||||
|
||||
OpenAPI commit 09a7fa63d9ae243e5407941f200960ca14d48b07 (2023-09-04)
|
||||
|
||||
## 0.203.3
|
||||
|
||||
Bundles:
|
||||
* Support cluster overrides with cluster_key and compute_key ([#696](https://github.com/databricks/cli/pull/696)).
|
||||
* Allow referencing local Python wheels without artifacts section defined ([#703](https://github.com/databricks/cli/pull/703)).
|
||||
* Fixed --environment flag ([#705](https://github.com/databricks/cli/pull/705)).
|
||||
* Correctly identify local paths in libraries section ([#702](https://github.com/databricks/cli/pull/702)).
|
||||
* Fixed path joining in FindFilesWithSuffixInPath ([#704](https://github.com/databricks/cli/pull/704)).
|
||||
* Added transformation mutator for Python wheel task for them to work on DBR <13.1 ([#635](https://github.com/databricks/cli/pull/635)).
|
||||
|
||||
Internal:
|
||||
* Add a foundation for built-in templates ([#685](https://github.com/databricks/cli/pull/685)).
|
||||
* Test transform when no Python wheel tasks defined ([#714](https://github.com/databricks/cli/pull/714)).
|
||||
* Pin Terraform binary version to 1.5.5 ([#715](https://github.com/databricks/cli/pull/715)).
|
||||
* Cleanup after "Add a foundation for built-in templates" ([#707](https://github.com/databricks/cli/pull/707)).
|
||||
* Filter down to Python wheel tasks only for trampoline ([#712](https://github.com/databricks/cli/pull/712)).
|
||||
* Update Terraform provider schema structs from 1.23.0 ([#713](https://github.com/databricks/cli/pull/713)).
|
||||
|
||||
## 0.203.2
|
||||
|
||||
CLI:
|
||||
* Added `databricks account o-auth-enrollment enable` command ([#687](https://github.com/databricks/cli/pull/687)).
|
||||
|
||||
Bundles:
|
||||
* Do not try auto detect Python package if no Python wheel tasks defined ([#674](https://github.com/databricks/cli/pull/674)).
|
||||
* Renamed `environments` to `targets` in bundle configuration ([#670](https://github.com/databricks/cli/pull/670)).
|
||||
* Rename init project-dir flag to output-dir ([#676](https://github.com/databricks/cli/pull/676)).
|
||||
* Added support for sync.include and sync.exclude sections ([#671](https://github.com/databricks/cli/pull/671)).
|
||||
* Add template directory flag for bundle templates ([#675](https://github.com/databricks/cli/pull/675)).
|
||||
* Never ignore root directory when enumerating files in a repository ([#683](https://github.com/databricks/cli/pull/683)).
|
||||
* Improve 'mode' error message ([#681](https://github.com/databricks/cli/pull/681)).
|
||||
* Added run_as section for bundle configuration ([#692](https://github.com/databricks/cli/pull/692)).
|
||||
|
||||
## 0.203.1
|
||||
|
||||
CLI:
|
||||
* Always resolve .databrickscfg file ([#659](https://github.com/databricks/cli/pull/659)).
|
||||
|
||||
Bundles:
|
||||
* Add internal tag for bundle fields to be skipped from schema ([#636](https://github.com/databricks/cli/pull/636)).
|
||||
* Log the bundle root configuration file if applicable ([#657](https://github.com/databricks/cli/pull/657)).
|
||||
* Execute paths without the .tmpl extension as templates ([#654](https://github.com/databricks/cli/pull/654)).
|
||||
* Enable environment overrides for job clusters ([#658](https://github.com/databricks/cli/pull/658)).
|
||||
* Merge artifacts and resources block with overrides enabled ([#660](https://github.com/databricks/cli/pull/660)).
|
||||
* Locked terraform binary version to <= 1.5.5 ([#666](https://github.com/databricks/cli/pull/666)).
|
||||
* Return better error messages for invalid JSON schema types in templates ([#661](https://github.com/databricks/cli/pull/661)).
|
||||
* Use custom prompter for bundle template inputs ([#663](https://github.com/databricks/cli/pull/663)).
|
||||
* Add map and pair helper functions for bundle templates ([#665](https://github.com/databricks/cli/pull/665)).
|
||||
* Correct name for force acquire deploy flag ([#656](https://github.com/databricks/cli/pull/656)).
|
||||
* Confirm that override with a zero value doesn't work ([#669](https://github.com/databricks/cli/pull/669)).
|
||||
|
||||
Internal:
|
||||
* Consolidate functions in libs/git ([#652](https://github.com/databricks/cli/pull/652)).
|
||||
* Upgraded Go version to 1.21 ([#664](https://github.com/databricks/cli/pull/664)).
|
||||
|
||||
## 0.203.0
|
||||
|
||||
CLI:
|
||||
* Infer host from profile during `auth login` ([#629](https://github.com/databricks/cli/pull/629)).
|
||||
|
||||
Bundles:
|
||||
* Extend deployment mode support ([#577](https://github.com/databricks/cli/pull/577)).
|
||||
* Add validation for Git settings in bundles ([#578](https://github.com/databricks/cli/pull/578)).
|
||||
* Only treat files with .tmpl extension as templates ([#594](https://github.com/databricks/cli/pull/594)).
|
||||
* Add JSON schema validation for input template parameters ([#598](https://github.com/databricks/cli/pull/598)).
|
||||
* Add DATABRICKS_BUNDLE_INCLUDE_PATHS to specify include paths through env vars ([#591](https://github.com/databricks/cli/pull/591)).
|
||||
* Initialise a empty default bundle if BUNDLE_ROOT and DATABRICKS_BUNDLE_INCLUDES env vars are present ([#604](https://github.com/databricks/cli/pull/604)).
|
||||
* Regenerate bundle resource structs from latest Terraform provider ([#633](https://github.com/databricks/cli/pull/633)).
|
||||
* Fixed processing jobs libraries with remote path ([#638](https://github.com/databricks/cli/pull/638)).
|
||||
* Add unit test for file name execution during rendering ([#640](https://github.com/databricks/cli/pull/640)).
|
||||
* Add bundle init command and support for prompting user for input values ([#631](https://github.com/databricks/cli/pull/631)).
|
||||
* Fix bundle git branch validation ([#645](https://github.com/databricks/cli/pull/645)).
|
||||
|
||||
Internal:
|
||||
* Fix mkdir integration test on GCP ([#620](https://github.com/databricks/cli/pull/620)).
|
||||
* Fix git clone integration test for non-existing repo ([#610](https://github.com/databricks/cli/pull/610)).
|
||||
* Remove push to main trigger for build workflow ([#621](https://github.com/databricks/cli/pull/621)).
|
||||
* Remove workflow to publish binaries to S3 ([#622](https://github.com/databricks/cli/pull/622)).
|
||||
* Fix failing fs mkdir test on azure ([#627](https://github.com/databricks/cli/pull/627)).
|
||||
* Print y/n options when displaying prompts using cmdio.Ask ([#650](https://github.com/databricks/cli/pull/650)).
|
||||
|
||||
API Changes:
|
||||
* Changed `databricks account metastore-assignments create` command to not return anything.
|
||||
* Added `databricks account network-policy` command group.
|
||||
|
||||
OpenAPI commit 7b57ba3a53f4de3d049b6a24391fe5474212daf8 (2023-07-28)
|
||||
|
||||
Dependency updates:
|
||||
* Bump OpenAPI specification & Go SDK Version ([#624](https://github.com/databricks/cli/pull/624)).
|
||||
* Bump golang.org/x/term from 0.10.0 to 0.11.0 ([#643](https://github.com/databricks/cli/pull/643)).
|
||||
* Bump golang.org/x/text from 0.11.0 to 0.12.0 ([#642](https://github.com/databricks/cli/pull/642)).
|
||||
* Bump golang.org/x/oauth2 from 0.10.0 to 0.11.0 ([#641](https://github.com/databricks/cli/pull/641)).
|
||||
|
||||
## 0.202.0
|
||||
|
||||
Breaking Change:
|
||||
* Require include glob patterns to be explicitly defined ([#602](https://github.com/databricks/cli/pull/602)).
|
||||
|
||||
Bundles:
|
||||
* Add support for more SDK config options ([#587](https://github.com/databricks/cli/pull/587)).
|
||||
* Add template renderer for Databricks templates ([#589](https://github.com/databricks/cli/pull/589)).
|
||||
* Fix formatting in renderer.go ([#593](https://github.com/databricks/cli/pull/593)).
|
||||
* Fixed python wheel test ([#608](https://github.com/databricks/cli/pull/608)).
|
||||
* Auto detect Python wheel packages and infer build command ([#603](https://github.com/databricks/cli/pull/603)).
|
||||
* Added support for artifacts building for bundles ([#583](https://github.com/databricks/cli/pull/583)).
|
||||
* Add support for cloning repositories ([#544](https://github.com/databricks/cli/pull/544)).
|
||||
* Add regexp compile helper function for templates ([#601](https://github.com/databricks/cli/pull/601)).
|
||||
* Add unit test that raw strings are printed as is ([#599](https://github.com/databricks/cli/pull/599)).
|
||||
|
||||
Internal:
|
||||
* Fix tests under ./cmd/configure if DATABRICKS_TOKEN is set ([#605](https://github.com/databricks/cli/pull/605)).
|
||||
* Remove dependency on global state in generated commands ([#595](https://github.com/databricks/cli/pull/595)).
|
||||
* Remove dependency on global state for the root command ([#606](https://github.com/databricks/cli/pull/606)).
|
||||
* Add merge_group trigger for build ([#612](https://github.com/databricks/cli/pull/612)).
|
||||
* Added support for build command chaining and error on missing wheel ([#607](https://github.com/databricks/cli/pull/607)).
|
||||
* Add TestAcc prefix to filer test and fix any failing tests ([#611](https://github.com/databricks/cli/pull/611)).
|
||||
* Add url parse helper function for templates ([#600](https://github.com/databricks/cli/pull/600)).
|
||||
* Remove dependency on global state for remaining commands ([#613](https://github.com/databricks/cli/pull/613)).
|
||||
* Update CHANGELOG template ([#588](https://github.com/databricks/cli/pull/588)).
|
||||
|
||||
|
||||
|
||||
## 0.201.0
|
||||
|
||||
CLI:
|
||||
* Support tab completion for profiles ([#572](https://github.com/databricks/cli/pull/572)).
|
||||
* Improve auth login experience ([#570](https://github.com/databricks/cli/pull/570)).
|
||||
* Integrate with auto-release infra ([#581](https://github.com/databricks/cli/pull/581)).
|
||||
|
||||
Bundles:
|
||||
* Add development runs ([#522](https://github.com/databricks/cli/pull/522)).
|
||||
* Correctly use --profile flag passed for all bundle commands ([#571](https://github.com/databricks/cli/pull/571)).
|
||||
* Disallow notebooks in paths where files are expected ([#573](https://github.com/databricks/cli/pull/573)).
|
||||
* Remove base path checks during sync ([#576](https://github.com/databricks/cli/pull/576)).
|
||||
* First look for databricks.yml before falling back to bundle.yml ([#580](https://github.com/databricks/cli/pull/580)).
|
||||
|
||||
API Changes:
|
||||
* Removed `databricks metastores maintenance` command.
|
||||
* Added `databricks metastores enable-optimization` command.
|
||||
* Added `databricks tables update` command.
|
||||
* Changed `databricks account settings delete-personal-compute-setting` command with new required argument order.
|
||||
* Changed `databricks account settings read-personal-compute-setting` command with new required argument order.
|
||||
* Added `databricks clean-rooms` command group.
|
||||
|
||||
OpenAPI commit 850a075ed9758d21a6bc4409506b48c8b9f93ab4 (2023-07-18)
|
||||
|
||||
Dependency updates:
|
||||
* Bump golang.org/x/term from 0.9.0 to 0.10.0 ([#567](https://github.com/databricks/cli/pull/567)).
|
||||
* Bump golang.org/x/oauth2 from 0.9.0 to 0.10.0 ([#566](https://github.com/databricks/cli/pull/566)).
|
||||
* Bump golang.org/x/mod from 0.11.0 to 0.12.0 ([#568](https://github.com/databricks/cli/pull/568)).
|
||||
* Bump github.com/databricks/databricks-sdk-go from 0.12.0 to 0.13.0 ([#585](https://github.com/databricks/cli/pull/585)).
|
||||
|
||||
## 0.200.2
|
||||
|
||||
CLI:
|
||||
|
|
|
@ -4,9 +4,10 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
|
||||
"slices"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// all is an internal proxy for producing a list of mutators for all artifacts.
|
||||
|
|
|
@ -0,0 +1,168 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
)
|
||||
|
||||
type mutatorFactory = func(name string) bundle.Mutator
|
||||
|
||||
var buildMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{
|
||||
config.ArtifactPythonWheel: whl.Build,
|
||||
}
|
||||
|
||||
var uploadMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{}
|
||||
|
||||
func getBuildMutator(t config.ArtifactType, name string) bundle.Mutator {
|
||||
mutatorFactory, ok := buildMutators[t]
|
||||
if !ok {
|
||||
mutatorFactory = BasicBuild
|
||||
}
|
||||
|
||||
return mutatorFactory(name)
|
||||
}
|
||||
|
||||
func getUploadMutator(t config.ArtifactType, name string) bundle.Mutator {
|
||||
mutatorFactory, ok := uploadMutators[t]
|
||||
if !ok {
|
||||
mutatorFactory = BasicUpload
|
||||
}
|
||||
|
||||
return mutatorFactory(name)
|
||||
}
|
||||
|
||||
// Basic Build defines a general build mutator which builds artifact based on artifact.BuildCommand
|
||||
type basicBuild struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func BasicBuild(name string) bundle.Mutator {
|
||||
return &basicBuild{name: name}
|
||||
}
|
||||
|
||||
func (m *basicBuild) Name() string {
|
||||
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *basicBuild) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.Build(%s): Building...", m.name))
|
||||
|
||||
out, err := artifact.Build(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("artifacts.Build(%s): %w, output: %s", m.name, err, out)
|
||||
}
|
||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.Build(%s): Build succeeded", m.name))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Basic Upload defines a general upload mutator which uploads artifact as a library to workspace
|
||||
type basicUpload struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func BasicUpload(name string) bundle.Mutator {
|
||||
return &basicUpload{name: name}
|
||||
}
|
||||
|
||||
func (m *basicUpload) Name() string {
|
||||
return fmt.Sprintf("artifacts.Build(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *basicUpload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
if len(artifact.Files) == 0 {
|
||||
return fmt.Errorf("artifact source is not configured: %s", m.name)
|
||||
}
|
||||
|
||||
err := uploadArtifact(ctx, artifact, b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("artifacts.Upload(%s): %w", m.name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func uploadArtifact(ctx context.Context, a *config.Artifact, b *bundle.Bundle) error {
|
||||
for i := range a.Files {
|
||||
f := &a.Files[i]
|
||||
if f.NeedsUpload() {
|
||||
filename := filepath.Base(f.Source)
|
||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.Upload(%s): Uploading...", filename))
|
||||
remotePath, err := uploadArtifactFile(ctx, f.Source, b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.Upload(%s): Upload succeeded", filename))
|
||||
|
||||
f.RemotePath = remotePath
|
||||
}
|
||||
}
|
||||
|
||||
a.NormalisePaths()
|
||||
return nil
|
||||
}
|
||||
|
||||
// Function to upload artifact file to Workspace
|
||||
func uploadArtifactFile(ctx context.Context, file string, b *bundle.Bundle) (string, error) {
|
||||
raw, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to read %s: %w", file, errors.Unwrap(err))
|
||||
}
|
||||
|
||||
uploadPath, err := getUploadBasePath(b)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
fileHash := sha256.Sum256(raw)
|
||||
remotePath := path.Join(uploadPath, fmt.Sprintf("%x", fileHash), filepath.Base(file))
|
||||
// Make sure target directory exists.
|
||||
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, path.Dir(remotePath))
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to create directory for %s: %w", remotePath, err)
|
||||
}
|
||||
|
||||
// Import to workspace.
|
||||
err = b.WorkspaceClient().Workspace.Import(ctx, workspace.Import{
|
||||
Path: remotePath,
|
||||
Overwrite: true,
|
||||
Format: workspace.ImportFormatAuto,
|
||||
Content: base64.StdEncoding.EncodeToString(raw),
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to import %s: %w", remotePath, err)
|
||||
}
|
||||
|
||||
return remotePath, nil
|
||||
}
|
||||
|
||||
func getUploadBasePath(b *bundle.Bundle) (string, error) {
|
||||
artifactPath := b.Config.Workspace.ArtifactsPath
|
||||
if artifactPath == "" {
|
||||
return "", fmt.Errorf("remote artifact path not configured")
|
||||
}
|
||||
|
||||
return path.Join(artifactPath, ".internal"), nil
|
||||
}
|
|
@ -0,0 +1,123 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func touchEmptyFile(t *testing.T, path string) {
|
||||
err := os.MkdirAll(filepath.Dir(path), 0700)
|
||||
require.NoError(t, err)
|
||||
f, err := os.Create(path)
|
||||
require.NoError(t, err)
|
||||
f.Close()
|
||||
}
|
||||
|
||||
type MockWorkspaceService struct {
|
||||
}
|
||||
|
||||
// Delete implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) Delete(ctx context.Context, request workspace.Delete) error {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Export implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) Export(ctx context.Context, request workspace.ExportRequest) (*workspace.ExportResponse, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// GetStatus implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) GetStatus(ctx context.Context, request workspace.GetStatusRequest) (*workspace.ObjectInfo, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Import implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) Import(ctx context.Context, request workspace.Import) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// List implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) List(ctx context.Context, request workspace.ListWorkspaceRequest) (*workspace.ListResponse, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// Mkdirs implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) Mkdirs(ctx context.Context, request workspace.Mkdirs) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// GetPermissionLevels implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) GetPermissionLevels(
|
||||
ctx context.Context,
|
||||
request workspace.GetWorkspaceObjectPermissionLevelsRequest,
|
||||
) (*workspace.GetWorkspaceObjectPermissionLevelsResponse, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// GetPermissions implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) GetPermissions(
|
||||
ctx context.Context,
|
||||
request workspace.GetWorkspaceObjectPermissionsRequest,
|
||||
) (*workspace.WorkspaceObjectPermissions, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// SetPermissions implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) SetPermissions(
|
||||
ctx context.Context,
|
||||
request workspace.WorkspaceObjectPermissionsRequest,
|
||||
) (*workspace.WorkspaceObjectPermissions, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
// UpdatePermissions implements workspace.WorkspaceService.
|
||||
func (MockWorkspaceService) UpdatePermissions(
|
||||
ctx context.Context,
|
||||
request workspace.WorkspaceObjectPermissionsRequest,
|
||||
) (*workspace.WorkspaceObjectPermissions, error) {
|
||||
panic("unimplemented")
|
||||
}
|
||||
|
||||
func TestUploadArtifactFileToCorrectRemotePath(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
whlPath := filepath.Join(dir, "dist", "test.whl")
|
||||
touchEmptyFile(t, whlPath)
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: dir,
|
||||
Bundle: config.Bundle{
|
||||
Target: "whatever",
|
||||
},
|
||||
Workspace: config.Workspace{
|
||||
ArtifactsPath: "/Users/test@databricks.com/whatever",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
b.WorkspaceClient().Workspace.WithImpl(MockWorkspaceService{})
|
||||
artifact := &config.Artifact{
|
||||
Type: "whl",
|
||||
Files: []config.ArtifactFile{
|
||||
{
|
||||
Source: whlPath,
|
||||
Libraries: []*compute.Library{
|
||||
{Whl: "dist\\test.whl"},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := uploadArtifact(context.Background(), artifact, b)
|
||||
require.NoError(t, err)
|
||||
require.Regexp(t, regexp.MustCompile("/Users/test@databricks.com/whatever/.internal/[a-z0-9]+/test.whl"), artifact.Files[0].RemotePath)
|
||||
require.Regexp(t, regexp.MustCompile("/Workspace/Users/test@databricks.com/whatever/.internal/[a-z0-9]+/test.whl"), artifact.Files[0].Libraries[0].Whl)
|
||||
}
|
|
@ -0,0 +1,33 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
func DetectPackages() bundle.Mutator {
|
||||
return &autodetect{}
|
||||
}
|
||||
|
||||
type autodetect struct {
|
||||
}
|
||||
|
||||
func (m *autodetect) Name() string {
|
||||
return "artifacts.DetectPackages"
|
||||
}
|
||||
|
||||
func (m *autodetect) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
// If artifacts section explicitly defined, do not try to auto detect packages
|
||||
if b.Config.Artifacts != nil {
|
||||
log.Debugf(ctx, "artifacts block is defined, skipping auto-detecting")
|
||||
return nil
|
||||
}
|
||||
|
||||
return bundle.Apply(ctx, b, bundle.Seq(
|
||||
whl.DetectPackage(),
|
||||
whl.DefineArtifactsFromLibraries(),
|
||||
))
|
||||
}
|
|
@ -3,9 +3,9 @@ package artifacts
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/artifacts/notebook"
|
||||
)
|
||||
|
||||
func BuildAll() bundle.Mutator {
|
||||
|
@ -33,9 +33,24 @@ func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
|||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
if artifact.Notebook != nil {
|
||||
return bundle.Apply(ctx, b, notebook.Build(m.name))
|
||||
// Skip building if build command is not specified or infered
|
||||
if artifact.BuildCommand == "" {
|
||||
// If no build command was specified or infered and there is no
|
||||
// artifact output files specified, artifact is misconfigured
|
||||
if len(artifact.Files) == 0 {
|
||||
return fmt.Errorf("misconfigured artifact: please specify 'build' or 'files' property")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
// If artifact path is not provided, use bundle root dir
|
||||
if artifact.Path == "" {
|
||||
artifact.Path = b.Config.Path
|
||||
}
|
||||
|
||||
if !filepath.IsAbs(artifact.Path) {
|
||||
artifact.Path = filepath.Join(b.Config.Path, artifact.Path)
|
||||
}
|
||||
|
||||
return bundle.Apply(ctx, b, getBuildMutator(artifact.Type, m.name))
|
||||
}
|
||||
|
|
|
@ -0,0 +1,64 @@
|
|||
package artifacts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/artifacts/whl"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
)
|
||||
|
||||
var inferMutators map[config.ArtifactType]mutatorFactory = map[config.ArtifactType]mutatorFactory{
|
||||
config.ArtifactPythonWheel: whl.InferBuildCommand,
|
||||
}
|
||||
|
||||
func getInferMutator(t config.ArtifactType, name string) bundle.Mutator {
|
||||
mutatorFactory, ok := inferMutators[t]
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return mutatorFactory(name)
|
||||
}
|
||||
|
||||
func InferMissingProperties() bundle.Mutator {
|
||||
return &all{
|
||||
name: "infer",
|
||||
fn: inferArtifactByName,
|
||||
}
|
||||
}
|
||||
|
||||
func inferArtifactByName(name string) (bundle.Mutator, error) {
|
||||
return &infer{name}, nil
|
||||
}
|
||||
|
||||
type infer struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (m *infer) Name() string {
|
||||
return fmt.Sprintf("artifacts.Infer(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
// only try to infer command if it's not already defined
|
||||
// and there is no explicitly files defined which means
|
||||
// that the package is built outside of bundle cycles
|
||||
// manually by customer
|
||||
if artifact.BuildCommand != "" || len(artifact.Files) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
inferMutator := getInferMutator(artifact.Type, m.name)
|
||||
if inferMutator != nil {
|
||||
return bundle.Apply(ctx, b, inferMutator)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
package notebook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
)
|
||||
|
||||
type build struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func Build(name string) bundle.Mutator {
|
||||
return &build{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *build) Name() string {
|
||||
return fmt.Sprintf("notebook.Build(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *build) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
a, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
artifact := a.Notebook
|
||||
|
||||
// Check if the filetype is supported.
|
||||
switch ext := strings.ToLower(filepath.Ext(artifact.Path)); ext {
|
||||
case ".py":
|
||||
artifact.Language = workspace.LanguagePython
|
||||
case ".scala":
|
||||
artifact.Language = workspace.LanguageScala
|
||||
case ".sql":
|
||||
artifact.Language = workspace.LanguageSql
|
||||
default:
|
||||
return fmt.Errorf("invalid notebook extension: %s", ext)
|
||||
}
|
||||
|
||||
// Open underlying file.
|
||||
f, err := os.Open(filepath.Join(b.Config.Path, artifact.Path))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to open artifact file %s: %w", artifact.Path, errors.Unwrap(err))
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
// Check that the file contains the notebook marker on its first line.
|
||||
ok, err = hasMarker(artifact.Language, f)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read artifact file %s: %s", artifact.Path, errors.Unwrap(err))
|
||||
}
|
||||
if !ok {
|
||||
return fmt.Errorf("notebook marker not found in %s", artifact.Path)
|
||||
}
|
||||
|
||||
// Check that an artifact path is defined.
|
||||
remotePath := b.Config.Workspace.ArtifactsPath
|
||||
if remotePath == "" {
|
||||
return fmt.Errorf("remote artifact path not configured")
|
||||
}
|
||||
|
||||
// Store absolute paths.
|
||||
artifact.LocalPath = filepath.Join(b.Config.Path, artifact.Path)
|
||||
artifact.RemotePath = path.Join(remotePath, stripExtension(artifact.Path))
|
||||
return nil
|
||||
}
|
||||
|
||||
func stripExtension(path string) string {
|
||||
ext := filepath.Ext(path)
|
||||
return path[0 : len(path)-len(ext)]
|
||||
}
|
|
@ -1,29 +0,0 @@
|
|||
package notebook
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
)
|
||||
|
||||
func hasMarker(l workspace.Language, r io.Reader) (bool, error) {
|
||||
scanner := bufio.NewScanner(r)
|
||||
ok := scanner.Scan()
|
||||
if !ok {
|
||||
return false, scanner.Err()
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
switch l {
|
||||
case workspace.LanguagePython:
|
||||
return line == "# Databricks notebook source", nil
|
||||
case workspace.LanguageScala:
|
||||
return line == "// Databricks notebook source", nil
|
||||
case workspace.LanguageSql:
|
||||
return line == "-- Databricks notebook source", nil
|
||||
default:
|
||||
panic("language not handled: " + l)
|
||||
}
|
||||
}
|
|
@ -1,60 +0,0 @@
|
|||
package notebook
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
)
|
||||
|
||||
type upload struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func Upload(name string) bundle.Mutator {
|
||||
return &upload{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *upload) Name() string {
|
||||
return fmt.Sprintf("notebook.Upload(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
a, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
artifact := a.Notebook
|
||||
raw, err := os.ReadFile(artifact.LocalPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to read %s: %w", m.name, errors.Unwrap(err))
|
||||
}
|
||||
|
||||
// Make sure target directory exists.
|
||||
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, path.Dir(artifact.RemotePath))
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create directory for %s: %w", m.name, err)
|
||||
}
|
||||
|
||||
// Import to workspace.
|
||||
err = b.WorkspaceClient().Workspace.Import(ctx, workspace.Import{
|
||||
Path: artifact.RemotePath,
|
||||
Overwrite: true,
|
||||
Format: workspace.ImportFormatSource,
|
||||
Language: artifact.Language,
|
||||
Content: base64.StdEncoding.EncodeToString(raw),
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to import %s: %w", m.name, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -5,7 +5,7 @@ import (
|
|||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/artifacts/notebook"
|
||||
"github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
)
|
||||
|
||||
func UploadAll() bundle.Mutator {
|
||||
|
@ -15,6 +15,10 @@ func UploadAll() bundle.Mutator {
|
|||
}
|
||||
}
|
||||
|
||||
func CleanUp() bundle.Mutator {
|
||||
return &cleanUp{}
|
||||
}
|
||||
|
||||
type upload struct {
|
||||
name string
|
||||
}
|
||||
|
@ -33,8 +37,33 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) error {
|
|||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
if artifact.Notebook != nil {
|
||||
return bundle.Apply(ctx, b, notebook.Upload(m.name))
|
||||
if len(artifact.Files) == 0 {
|
||||
return fmt.Errorf("artifact source is not configured: %s", m.name)
|
||||
}
|
||||
|
||||
return bundle.Apply(ctx, b, getUploadMutator(artifact.Type, m.name))
|
||||
}
|
||||
|
||||
type cleanUp struct{}
|
||||
|
||||
func (m *cleanUp) Name() string {
|
||||
return "artifacts.CleanUp"
|
||||
}
|
||||
|
||||
func (m *cleanUp) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
uploadPath, err := getUploadBasePath(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.WorkspaceClient().Workspace.Delete(ctx, workspace.Delete{
|
||||
Path: uploadPath,
|
||||
Recursive: true,
|
||||
})
|
||||
|
||||
err = b.WorkspaceClient().Workspace.MkdirsByPath(ctx, uploadPath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to create directory for %s: %w", uploadPath, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,81 @@
|
|||
package whl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
type detectPkg struct {
|
||||
}
|
||||
|
||||
func DetectPackage() bundle.Mutator {
|
||||
return &detectPkg{}
|
||||
}
|
||||
|
||||
func (m *detectPkg) Name() string {
|
||||
return "artifacts.whl.AutoDetect"
|
||||
}
|
||||
|
||||
func (m *detectPkg) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
wheelTasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||
if len(wheelTasks) == 0 {
|
||||
log.Infof(ctx, "No local wheel tasks in databricks.yml config, skipping auto detect")
|
||||
return nil
|
||||
}
|
||||
cmdio.LogString(ctx, "artifacts.whl.AutoDetect: Detecting Python wheel project...")
|
||||
|
||||
// checking if there is setup.py in the bundle root
|
||||
setupPy := filepath.Join(b.Config.Path, "setup.py")
|
||||
_, err := os.Stat(setupPy)
|
||||
if err != nil {
|
||||
cmdio.LogString(ctx, "artifacts.whl.AutoDetect: No Python wheel project found at bundle root folder")
|
||||
return nil
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.whl.AutoDetect: Found Python wheel project at %s", b.Config.Path))
|
||||
module := extractModuleName(setupPy)
|
||||
|
||||
if b.Config.Artifacts == nil {
|
||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||
}
|
||||
|
||||
pkgPath, err := filepath.Abs(b.Config.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.Config.Artifacts[module] = &config.Artifact{
|
||||
Path: pkgPath,
|
||||
Type: config.ArtifactPythonWheel,
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func extractModuleName(setupPy string) string {
|
||||
bytes, err := os.ReadFile(setupPy)
|
||||
if err != nil {
|
||||
return randomName()
|
||||
}
|
||||
|
||||
content := string(bytes)
|
||||
r := regexp.MustCompile(`name=['"](.*)['"]`)
|
||||
matches := r.FindStringSubmatch(content)
|
||||
if len(matches) == 0 {
|
||||
return randomName()
|
||||
}
|
||||
return matches[1]
|
||||
}
|
||||
|
||||
func randomName() string {
|
||||
return fmt.Sprintf("artifact%d", time.Now().Unix())
|
||||
}
|
|
@ -0,0 +1,22 @@
|
|||
package whl
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestExtractModuleName(t *testing.T) {
|
||||
moduleName := extractModuleName("./testdata/setup.py")
|
||||
assert.Equal(t, "my_test_code", moduleName)
|
||||
}
|
||||
|
||||
func TestExtractModuleNameMinimal(t *testing.T) {
|
||||
moduleName := extractModuleName("./testdata/setup_minimal.py")
|
||||
assert.Equal(t, "my_test_code", moduleName)
|
||||
}
|
||||
|
||||
func TestExtractModuleNameIncorrect(t *testing.T) {
|
||||
moduleName := extractModuleName("./testdata/setup_incorrect.py")
|
||||
assert.Contains(t, moduleName, "artifact")
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
package whl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/python"
|
||||
)
|
||||
|
||||
type build struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func Build(name string) bundle.Mutator {
|
||||
return &build{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *build) Name() string {
|
||||
return fmt.Sprintf("artifacts.whl.Build(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *build) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
artifact, ok := b.Config.Artifacts[m.name]
|
||||
if !ok {
|
||||
return fmt.Errorf("artifact doesn't exist: %s", m.name)
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.whl.Build(%s): Building...", m.name))
|
||||
|
||||
dir := artifact.Path
|
||||
|
||||
distPath := filepath.Join(dir, "dist")
|
||||
os.RemoveAll(distPath)
|
||||
python.CleanupWheelFolder(dir)
|
||||
|
||||
out, err := artifact.Build(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("artifacts.whl.Build(%s): Failed %w, output: %s", m.name, err, out)
|
||||
}
|
||||
cmdio.LogString(ctx, fmt.Sprintf("artifacts.whl.Build(%s): Build succeeded", m.name))
|
||||
|
||||
wheels := python.FindFilesWithSuffixInPath(distPath, ".whl")
|
||||
if len(wheels) == 0 {
|
||||
return fmt.Errorf("artifacts.whl.Build(%s): cannot find built wheel in %s", m.name, dir)
|
||||
}
|
||||
for _, wheel := range wheels {
|
||||
artifact.Files = append(artifact.Files, config.ArtifactFile{
|
||||
Source: wheel,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
package whl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
)
|
||||
|
||||
type fromLibraries struct{}
|
||||
|
||||
func DefineArtifactsFromLibraries() bundle.Mutator {
|
||||
return &fromLibraries{}
|
||||
}
|
||||
|
||||
func (m *fromLibraries) Name() string {
|
||||
return "artifacts.whl.DefineArtifactsFromLibraries"
|
||||
}
|
||||
|
||||
func (*fromLibraries) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
if len(b.Config.Artifacts) != 0 {
|
||||
log.Debugf(ctx, "Skipping defining artifacts from libraries because artifacts section is explicitly defined")
|
||||
return nil
|
||||
}
|
||||
|
||||
tasks := libraries.FindAllWheelTasksWithLocalLibraries(b)
|
||||
for _, task := range tasks {
|
||||
for _, lib := range task.Libraries {
|
||||
matches, err := filepath.Glob(filepath.Join(b.Config.Path, lib.Whl))
|
||||
// File referenced from libraries section does not exists, skipping
|
||||
if err != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for _, match := range matches {
|
||||
name := filepath.Base(match)
|
||||
if b.Config.Artifacts == nil {
|
||||
b.Config.Artifacts = make(map[string]*config.Artifact)
|
||||
}
|
||||
|
||||
log.Debugf(ctx, "Adding an artifact block for %s", match)
|
||||
b.Config.Artifacts[name] = &config.Artifact{
|
||||
Files: []config.ArtifactFile{
|
||||
{Source: match},
|
||||
},
|
||||
Type: config.ArtifactPythonWheel,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,34 @@
|
|||
package whl
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/python"
|
||||
)
|
||||
|
||||
type infer struct {
|
||||
name string
|
||||
}
|
||||
|
||||
func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
artifact := b.Config.Artifacts[m.name]
|
||||
py, err := python.DetectExecutable(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
artifact.BuildCommand = fmt.Sprintf("%s setup.py bdist_wheel", py)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *infer) Name() string {
|
||||
return fmt.Sprintf("artifacts.whl.Infer(%s)", m.name)
|
||||
}
|
||||
|
||||
func InferBuildCommand(name string) bundle.Mutator {
|
||||
return &infer{
|
||||
name: name,
|
||||
}
|
||||
}
|
|
@ -0,0 +1,15 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
import my_test_code
|
||||
|
||||
setup(
|
||||
name="my_test_code",
|
||||
version=my_test_code.__version__,
|
||||
author=my_test_code.__author__,
|
||||
url="https://databricks.com",
|
||||
author_email="john.doe@databricks.com",
|
||||
description="my test wheel",
|
||||
packages=find_packages(include=["my_test_code"]),
|
||||
entry_points={"group_1": "run=my_test_code.__main__:main"},
|
||||
install_requires=["setuptools"],
|
||||
)
|
|
@ -0,0 +1,14 @@
|
|||
from setuptools import setup, find_packages
|
||||
|
||||
import my_test_code
|
||||
|
||||
setup(
|
||||
version=my_test_code.__version__,
|
||||
author=my_test_code.__author__,
|
||||
url="https://databricks.com",
|
||||
author_email="john.doe@databricks.com",
|
||||
description="my test wheel",
|
||||
packages=find_packages(include=["my_test_code"]),
|
||||
entry_points={"group_1": "run=my_test_code.__main__:main"},
|
||||
install_requires=["setuptools"],
|
||||
)
|
|
@ -0,0 +1,3 @@
|
|||
from setuptools import setup
|
||||
|
||||
setup(name="my_test_code")
|
|
@ -7,21 +7,26 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/env"
|
||||
"github.com/databricks/cli/folders"
|
||||
"github.com/databricks/cli/libs/git"
|
||||
"github.com/databricks/cli/libs/locker"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/terraform"
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
sdkconfig "github.com/databricks/databricks-sdk-go/config"
|
||||
"github.com/hashicorp/terraform-exec/tfexec"
|
||||
)
|
||||
|
||||
const internalFolder = ".internal"
|
||||
|
||||
type Bundle struct {
|
||||
Config config.Root
|
||||
|
||||
|
@ -43,9 +48,30 @@ type Bundle struct {
|
|||
AutoApprove bool
|
||||
}
|
||||
|
||||
func Load(path string) (*Bundle, error) {
|
||||
func Load(ctx context.Context, path string) (*Bundle, error) {
|
||||
bundle := &Bundle{}
|
||||
err := bundle.Config.Load(filepath.Join(path, config.FileName))
|
||||
stat, err := os.Stat(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
configFile, err := config.FileNames.FindInPath(path)
|
||||
if err != nil {
|
||||
_, hasRootEnv := env.Root(ctx)
|
||||
_, hasIncludesEnv := env.Includes(ctx)
|
||||
if hasRootEnv && hasIncludesEnv && stat.IsDir() {
|
||||
log.Debugf(ctx, "No bundle configuration; using bundle root: %s", path)
|
||||
bundle.Config = config.Root{
|
||||
Path: path,
|
||||
Bundle: config.Bundle{
|
||||
Name: filepath.Base(path),
|
||||
},
|
||||
}
|
||||
return bundle, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
log.Debugf(ctx, "Loading bundle configuration from: %s", configFile)
|
||||
err = bundle.Config.Load(configFile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -54,20 +80,20 @@ func Load(path string) (*Bundle, error) {
|
|||
|
||||
// MustLoad returns a bundle configuration.
|
||||
// It returns an error if a bundle was not found or could not be loaded.
|
||||
func MustLoad() (*Bundle, error) {
|
||||
root, err := mustGetRoot()
|
||||
func MustLoad(ctx context.Context) (*Bundle, error) {
|
||||
root, err := mustGetRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return Load(root)
|
||||
return Load(ctx, root)
|
||||
}
|
||||
|
||||
// TryLoad returns a bundle configuration if there is one, but doesn't fail if there isn't one.
|
||||
// It returns an error if a bundle was found but could not be loaded.
|
||||
// It returns a `nil` bundle if a bundle was not found.
|
||||
func TryLoad() (*Bundle, error) {
|
||||
root, err := tryGetRoot()
|
||||
func TryLoad(ctx context.Context) (*Bundle, error) {
|
||||
root, err := tryGetRoot(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -77,7 +103,7 @@ func TryLoad() (*Bundle, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
return Load(root)
|
||||
return Load(ctx, root)
|
||||
}
|
||||
|
||||
func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient {
|
||||
|
@ -92,14 +118,13 @@ func (b *Bundle) WorkspaceClient() *databricks.WorkspaceClient {
|
|||
}
|
||||
|
||||
// CacheDir returns directory to use for temporary files for this bundle.
|
||||
// Scoped to the bundle's environment.
|
||||
func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
||||
if b.Config.Bundle.Environment == "" {
|
||||
panic("environment not set")
|
||||
// Scoped to the bundle's target.
|
||||
func (b *Bundle) CacheDir(ctx context.Context, paths ...string) (string, error) {
|
||||
if b.Config.Bundle.Target == "" {
|
||||
panic("target not set")
|
||||
}
|
||||
|
||||
cacheDirName, exists := os.LookupEnv("DATABRICKS_BUNDLE_TMP")
|
||||
|
||||
cacheDirName, exists := env.TempDir(ctx)
|
||||
if !exists || cacheDirName == "" {
|
||||
cacheDirName = filepath.Join(
|
||||
// Anchor at bundle root directory.
|
||||
|
@ -113,8 +138,8 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
|||
// Fixed components of the result path.
|
||||
parts := []string{
|
||||
cacheDirName,
|
||||
// Scope with environment name.
|
||||
b.Config.Bundle.Environment,
|
||||
// Scope with target name.
|
||||
b.Config.Bundle.Target,
|
||||
}
|
||||
|
||||
// Append dynamic components of the result path.
|
||||
|
@ -130,6 +155,38 @@ func (b *Bundle) CacheDir(paths ...string) (string, error) {
|
|||
return dir, nil
|
||||
}
|
||||
|
||||
// This directory is used to store and automaticaly sync internal bundle files, such as, f.e
|
||||
// notebook trampoline files for Python wheel and etc.
|
||||
func (b *Bundle) InternalDir(ctx context.Context) (string, error) {
|
||||
cacheDir, err := b.CacheDir(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
dir := filepath.Join(cacheDir, internalFolder)
|
||||
err = os.MkdirAll(dir, 0700)
|
||||
if err != nil {
|
||||
return dir, err
|
||||
}
|
||||
|
||||
return dir, nil
|
||||
}
|
||||
|
||||
// GetSyncIncludePatterns returns a list of user defined includes
|
||||
// And also adds InternalDir folder to include list for sync command
|
||||
// so this folder is always synced
|
||||
func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
||||
internalDir, err := b.InternalDir(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
internalDirRel, err := filepath.Rel(b.Config.Path, internalDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return append(b.Config.Sync.Include, filepath.ToSlash(filepath.Join(internalDirRel, "*.*"))), nil
|
||||
}
|
||||
|
||||
func (b *Bundle) GitRepository() (*git.Repository, error) {
|
||||
rootPath, err := folders.FindDirWithLeaf(b.Config.Path, ".git")
|
||||
if err != nil {
|
||||
|
|
|
@ -1,108 +1,112 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/env"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestLoadNotExists(t *testing.T) {
|
||||
b, err := Load("/doesntexist")
|
||||
b, err := Load(context.Background(), "/doesntexist")
|
||||
assert.True(t, os.IsNotExist(err))
|
||||
assert.Nil(t, b)
|
||||
}
|
||||
|
||||
func TestLoadExists(t *testing.T) {
|
||||
b, err := Load("./tests/basic")
|
||||
b, err := Load(context.Background(), "./tests/basic")
|
||||
require.Nil(t, err)
|
||||
assert.Equal(t, "basic", b.Config.Bundle.Name)
|
||||
}
|
||||
|
||||
func TestBundleCacheDir(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
projectDir := t.TempDir()
|
||||
f1, err := os.Create(filepath.Join(projectDir, "bundle.yml"))
|
||||
f1, err := os.Create(filepath.Join(projectDir, "databricks.yml"))
|
||||
require.NoError(t, err)
|
||||
f1.Close()
|
||||
|
||||
bundle, err := Load(projectDir)
|
||||
bundle, err := Load(ctx, projectDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Artificially set environment.
|
||||
// This is otherwise done by [mutators.SelectEnvironment].
|
||||
bundle.Config.Bundle.Environment = "default"
|
||||
// Artificially set target.
|
||||
// This is otherwise done by [mutators.SelectTarget].
|
||||
bundle.Config.Bundle.Target = "default"
|
||||
|
||||
// unset env variable in case it's set
|
||||
t.Setenv("DATABRICKS_BUNDLE_TMP", "")
|
||||
|
||||
cacheDir, err := bundle.CacheDir()
|
||||
cacheDir, err := bundle.CacheDir(ctx)
|
||||
|
||||
// format is <CWD>/.databricks/bundle/<environment>
|
||||
// format is <CWD>/.databricks/bundle/<target>
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, filepath.Join(projectDir, ".databricks", "bundle", "default"), cacheDir)
|
||||
}
|
||||
|
||||
func TestBundleCacheDirOverride(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
projectDir := t.TempDir()
|
||||
bundleTmpDir := t.TempDir()
|
||||
f1, err := os.Create(filepath.Join(projectDir, "bundle.yml"))
|
||||
f1, err := os.Create(filepath.Join(projectDir, "databricks.yml"))
|
||||
require.NoError(t, err)
|
||||
f1.Close()
|
||||
|
||||
bundle, err := Load(projectDir)
|
||||
bundle, err := Load(ctx, projectDir)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Artificially set environment.
|
||||
// This is otherwise done by [mutators.SelectEnvironment].
|
||||
bundle.Config.Bundle.Environment = "default"
|
||||
// Artificially set target.
|
||||
// This is otherwise done by [mutators.SelectTarget].
|
||||
bundle.Config.Bundle.Target = "default"
|
||||
|
||||
// now we expect to use 'bundleTmpDir' instead of CWD/.databricks/bundle
|
||||
t.Setenv("DATABRICKS_BUNDLE_TMP", bundleTmpDir)
|
||||
|
||||
cacheDir, err := bundle.CacheDir()
|
||||
cacheDir, err := bundle.CacheDir(ctx)
|
||||
|
||||
// format is <DATABRICKS_BUNDLE_TMP>/<environment>
|
||||
// format is <DATABRICKS_BUNDLE_TMP>/<target>
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, filepath.Join(bundleTmpDir, "default"), cacheDir)
|
||||
}
|
||||
|
||||
func TestBundleMustLoadSuccess(t *testing.T) {
|
||||
t.Setenv(envBundleRoot, "./tests/basic")
|
||||
b, err := MustLoad()
|
||||
t.Setenv(env.RootVariable, "./tests/basic")
|
||||
b, err := MustLoad(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
||||
}
|
||||
|
||||
func TestBundleMustLoadFailureWithEnv(t *testing.T) {
|
||||
t.Setenv(envBundleRoot, "./tests/doesntexist")
|
||||
_, err := MustLoad()
|
||||
t.Setenv(env.RootVariable, "./tests/doesntexist")
|
||||
_, err := MustLoad(context.Background())
|
||||
require.Error(t, err, "not a directory")
|
||||
}
|
||||
|
||||
func TestBundleMustLoadFailureIfNotFound(t *testing.T) {
|
||||
chdir(t, t.TempDir())
|
||||
_, err := MustLoad()
|
||||
_, err := MustLoad(context.Background())
|
||||
require.Error(t, err, "unable to find bundle root")
|
||||
}
|
||||
|
||||
func TestBundleTryLoadSuccess(t *testing.T) {
|
||||
t.Setenv(envBundleRoot, "./tests/basic")
|
||||
b, err := TryLoad()
|
||||
t.Setenv(env.RootVariable, "./tests/basic")
|
||||
b, err := TryLoad(context.Background())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "tests/basic", filepath.ToSlash(b.Config.Path))
|
||||
}
|
||||
|
||||
func TestBundleTryLoadFailureWithEnv(t *testing.T) {
|
||||
t.Setenv(envBundleRoot, "./tests/doesntexist")
|
||||
_, err := TryLoad()
|
||||
t.Setenv(env.RootVariable, "./tests/doesntexist")
|
||||
_, err := TryLoad(context.Background())
|
||||
require.Error(t, err, "not a directory")
|
||||
}
|
||||
|
||||
func TestBundleTryLoadOkIfNotFound(t *testing.T) {
|
||||
chdir(t, t.TempDir())
|
||||
b, err := TryLoad()
|
||||
b, err := TryLoad(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Nil(t, b)
|
||||
}
|
||||
|
|
|
@ -1,20 +1,101 @@
|
|||
package config
|
||||
|
||||
import "github.com/databricks/databricks-sdk-go/service/workspace"
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
)
|
||||
|
||||
type Artifacts map[string]*Artifact
|
||||
|
||||
func (artifacts Artifacts) SetConfigFilePath(path string) {
|
||||
for _, artifact := range artifacts {
|
||||
artifact.ConfigFilePath = path
|
||||
}
|
||||
}
|
||||
|
||||
type ArtifactType string
|
||||
|
||||
const ArtifactPythonWheel ArtifactType = `whl`
|
||||
|
||||
type ArtifactFile struct {
|
||||
Source string `json:"source"`
|
||||
RemotePath string `json:"-" bundle:"readonly"`
|
||||
Libraries []*compute.Library `json:"-" bundle:"readonly"`
|
||||
}
|
||||
|
||||
// Artifact defines a single local code artifact that can be
|
||||
// built/uploaded/referenced in the context of this bundle.
|
||||
type Artifact struct {
|
||||
Notebook *NotebookArtifact `json:"notebook,omitempty"`
|
||||
}
|
||||
Type ArtifactType `json:"type"`
|
||||
|
||||
type NotebookArtifact struct {
|
||||
// The local path to the directory with a root of artifact,
|
||||
// for example, where setup.py is for Python projects
|
||||
Path string `json:"path"`
|
||||
|
||||
// Language is detected during build step.
|
||||
Language workspace.Language `json:"language,omitempty" bundle:"readonly"`
|
||||
// The relative or absolute path to the built artifact files
|
||||
// (Python wheel, Java jar and etc) itself
|
||||
Files []ArtifactFile `json:"files"`
|
||||
BuildCommand string `json:"build"`
|
||||
|
||||
// Paths are synthesized during build step.
|
||||
LocalPath string `json:"local_path,omitempty" bundle:"readonly"`
|
||||
RemotePath string `json:"remote_path,omitempty" bundle:"readonly"`
|
||||
paths.Paths
|
||||
}
|
||||
|
||||
func (a *Artifact) Build(ctx context.Context) ([]byte, error) {
|
||||
if a.BuildCommand == "" {
|
||||
return nil, fmt.Errorf("no build property defined")
|
||||
}
|
||||
|
||||
out := make([][]byte, 0)
|
||||
commands := strings.Split(a.BuildCommand, " && ")
|
||||
for _, command := range commands {
|
||||
buildParts := strings.Split(command, " ")
|
||||
cmd := exec.CommandContext(ctx, buildParts[0], buildParts[1:]...)
|
||||
cmd.Dir = a.Path
|
||||
res, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return res, err
|
||||
}
|
||||
out = append(out, res)
|
||||
}
|
||||
return bytes.Join(out, []byte{}), nil
|
||||
}
|
||||
|
||||
func (a *Artifact) NormalisePaths() {
|
||||
for _, f := range a.Files {
|
||||
// If no libraries attached, nothing to normalise, skipping
|
||||
if f.Libraries == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
wsfsBase := "/Workspace"
|
||||
remotePath := path.Join(wsfsBase, f.RemotePath)
|
||||
for i := range f.Libraries {
|
||||
lib := f.Libraries[i]
|
||||
if lib.Whl != "" {
|
||||
lib.Whl = remotePath
|
||||
continue
|
||||
}
|
||||
if lib.Jar != "" {
|
||||
lib.Jar = remotePath
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
// This function determines if artifact files needs to be uploaded.
|
||||
// During the bundle processing we analyse which library uses which artifact file.
|
||||
// If artifact file is used as a library, we store the reference to this library in artifact file Libraries field.
|
||||
// If artifact file has libraries it's been used in, it means than we need to upload this file.
|
||||
// Otherwise this artifact file is not used and we skip uploading
|
||||
func (af *ArtifactFile) NeedsUpload() bool {
|
||||
return af.Libraries != nil
|
||||
}
|
||||
|
|
|
@ -15,7 +15,10 @@ type Bundle struct {
|
|||
// Default warehouse to run SQL on.
|
||||
// DefaultWarehouse string `json:"default_warehouse,omitempty"`
|
||||
|
||||
// Environment is set by the mutator that selects the environment.
|
||||
// Target is set by the mutator that selects the target.
|
||||
Target string `json:"target,omitempty" bundle:"readonly"`
|
||||
|
||||
// DEPRECATED. Left for backward compatibility with Target
|
||||
Environment string `json:"environment,omitempty" bundle:"readonly"`
|
||||
|
||||
// Terraform holds configuration related to Terraform.
|
||||
|
@ -25,14 +28,17 @@ type Bundle struct {
|
|||
// Lock configures locking behavior on deployment.
|
||||
Lock Lock `json:"lock" bundle:"readonly"`
|
||||
|
||||
// Force-override Git branch validation.
|
||||
Force bool `json:"force" bundle:"readonly"`
|
||||
|
||||
// Contains Git information like current commit, current branch and
|
||||
// origin url. Automatically loaded by reading .git directory if not specified
|
||||
Git Git `json:"git,omitempty"`
|
||||
|
||||
// Determines the mode of the environment.
|
||||
// Determines the mode of the target.
|
||||
// For example, 'mode: development' can be used for deployments for
|
||||
// development purposes.
|
||||
// Annotated readonly as this should be set at the environment level.
|
||||
// Annotated readonly as this should be set at the target level.
|
||||
Mode Mode `json:"mode,omitempty" bundle:"readonly"`
|
||||
|
||||
// Overrides the compute used for jobs and other supported assets.
|
||||
|
|
|
@ -1,38 +0,0 @@
|
|||
package config
|
||||
|
||||
type Mode string
|
||||
|
||||
// Environment defines overrides for a single environment.
|
||||
// This structure is recursively merged into the root configuration.
|
||||
type Environment struct {
|
||||
// Default marks that this environment must be used if one isn't specified
|
||||
// by the user (through environment variable or command line argument).
|
||||
Default bool `json:"default,omitempty"`
|
||||
|
||||
// Determines the mode of the environment.
|
||||
// For example, 'mode: development' can be used for deployments for
|
||||
// development purposes.
|
||||
Mode Mode `json:"mode,omitempty"`
|
||||
|
||||
// Overrides the compute used for jobs and other supported assets.
|
||||
ComputeID string `json:"compute_id,omitempty"`
|
||||
|
||||
Bundle *Bundle `json:"bundle,omitempty"`
|
||||
|
||||
Workspace *Workspace `json:"workspace,omitempty"`
|
||||
|
||||
Artifacts map[string]*Artifact `json:"artifacts,omitempty"`
|
||||
|
||||
Resources *Resources `json:"resources,omitempty"`
|
||||
|
||||
// Override default values for defined variables
|
||||
// Does not permit defining new variables or redefining existing ones
|
||||
// in the scope of an environment
|
||||
Variables map[string]string `json:"variables,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
// Right now, we just have a default / "" mode and a "development" mode.
|
||||
// Additional modes are expected to come for pull-requests and production.
|
||||
Development Mode = "development"
|
||||
)
|
|
@ -0,0 +1,18 @@
|
|||
package config
|
||||
|
||||
type Experimental struct {
|
||||
Scripts map[ScriptHook]Command `json:"scripts,omitempty"`
|
||||
}
|
||||
|
||||
type Command string
|
||||
type ScriptHook string
|
||||
|
||||
// These hook names are subject to change and currently experimental
|
||||
const (
|
||||
ScriptPreInit ScriptHook = "preinit"
|
||||
ScriptPostInit ScriptHook = "postinit"
|
||||
ScriptPreBuild ScriptHook = "prebuild"
|
||||
ScriptPostBuild ScriptHook = "postbuild"
|
||||
ScriptPreDeploy ScriptHook = "predeploy"
|
||||
ScriptPostDeploy ScriptHook = "postdeploy"
|
||||
)
|
|
@ -4,4 +4,10 @@ type Git struct {
|
|||
Branch string `json:"branch,omitempty"`
|
||||
OriginURL string `json:"origin_url,omitempty"`
|
||||
Commit string `json:"commit,omitempty" bundle:"readonly"`
|
||||
|
||||
// Inferred is set to true if the Git details were inferred and weren't set explicitly
|
||||
Inferred bool `json:"-" bundle:"readonly"`
|
||||
|
||||
// The actual branch according to Git (may be different from the configured branch)
|
||||
ActualBranch string `json:"-" bundle:"readonly"`
|
||||
}
|
||||
|
|
|
@ -9,10 +9,11 @@ import (
|
|||
"sort"
|
||||
"strings"
|
||||
|
||||
"slices"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
"golang.org/x/exp/maps"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
const Delimiter = "."
|
||||
|
@ -183,7 +184,7 @@ func (a *accumulator) Resolve(path string, seenPaths []string, fns ...LookupFunc
|
|||
// fetch the string node to resolve
|
||||
field, ok := a.strings[path]
|
||||
if !ok {
|
||||
return fmt.Errorf("could not resolve reference %s", path)
|
||||
return fmt.Errorf("no value found for interpolation reference: ${%s}", path)
|
||||
}
|
||||
|
||||
// return early if the string field has no variables to interpolate
|
||||
|
|
|
@ -247,5 +247,5 @@ func TestInterpolationInvalidVariableReference(t *testing.T) {
|
|||
}
|
||||
|
||||
err := expand(&config)
|
||||
assert.ErrorContains(t, err, "could not resolve reference vars.foo")
|
||||
assert.ErrorContains(t, err, "no value found for interpolation reference: ${vars.foo}")
|
||||
}
|
||||
|
|
|
@ -3,9 +3,8 @@ package interpolation
|
|||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
// LookupFunction returns the value to rewrite a path expression to.
|
||||
|
|
|
@ -1,37 +0,0 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
)
|
||||
|
||||
type defineDefaultEnvironment struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// DefineDefaultEnvironment adds an environment named "default"
|
||||
// to the configuration if none have been defined.
|
||||
func DefineDefaultEnvironment() bundle.Mutator {
|
||||
return &defineDefaultEnvironment{
|
||||
name: "default",
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defineDefaultEnvironment) Name() string {
|
||||
return fmt.Sprintf("DefineDefaultEnvironment(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *defineDefaultEnvironment) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
// Nothing to do if the configuration has at least 1 environment.
|
||||
if len(b.Config.Environments) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Define default environment.
|
||||
b.Config.Environments = make(map[string]*config.Environment)
|
||||
b.Config.Environments[m.name] = &config.Environment{}
|
||||
return nil
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"golang.org/x/exp/slices"
|
||||
)
|
||||
|
||||
type defineDefaultInclude struct {
|
||||
include []string
|
||||
}
|
||||
|
||||
// DefineDefaultInclude sets the list of includes to a default if it hasn't been set.
|
||||
func DefineDefaultInclude() bundle.Mutator {
|
||||
return &defineDefaultInclude{
|
||||
// When we support globstar we can collapse below into a single line.
|
||||
include: []string{
|
||||
// Load YAML files in the same directory.
|
||||
"*.yml",
|
||||
// Load YAML files in subdirectories.
|
||||
"*/*.yml",
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defineDefaultInclude) Name() string {
|
||||
return "DefineDefaultInclude"
|
||||
}
|
||||
|
||||
func (m *defineDefaultInclude) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
if len(b.Config.Include) == 0 {
|
||||
b.Config.Include = slices.Clone(m.include)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDefaultInclude(t *testing.T) {
|
||||
bundle := &bundle.Bundle{}
|
||||
err := mutator.DefineDefaultInclude().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{"*.yml", "*/*.yml"}, bundle.Config.Include)
|
||||
}
|
|
@ -0,0 +1,37 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
)
|
||||
|
||||
type defineDefaultTarget struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// DefineDefaultTarget adds a target named "default"
|
||||
// to the configuration if none have been defined.
|
||||
func DefineDefaultTarget() bundle.Mutator {
|
||||
return &defineDefaultTarget{
|
||||
name: "default",
|
||||
}
|
||||
}
|
||||
|
||||
func (m *defineDefaultTarget) Name() string {
|
||||
return fmt.Sprintf("DefineDefaultTarget(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *defineDefaultTarget) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
// Nothing to do if the configuration has at least 1 target.
|
||||
if len(b.Config.Targets) > 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Define default target.
|
||||
b.Config.Targets = make(map[string]*config.Target)
|
||||
b.Config.Targets[m.name] = &config.Target{}
|
||||
return nil
|
||||
}
|
|
@ -11,25 +11,25 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDefaultEnvironment(t *testing.T) {
|
||||
func TestDefaultTarget(t *testing.T) {
|
||||
bundle := &bundle.Bundle{}
|
||||
err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.DefineDefaultTarget().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
env, ok := bundle.Config.Environments["default"]
|
||||
env, ok := bundle.Config.Targets["default"]
|
||||
assert.True(t, ok)
|
||||
assert.Equal(t, &config.Environment{}, env)
|
||||
assert.Equal(t, &config.Target{}, env)
|
||||
}
|
||||
|
||||
func TestDefaultEnvironmentAlreadySpecified(t *testing.T) {
|
||||
func TestDefaultTargetAlreadySpecified(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Environments: map[string]*config.Environment{
|
||||
Targets: map[string]*config.Target{
|
||||
"development": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.DefineDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
err := mutator.DefineDefaultTarget().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
_, ok := bundle.Config.Environments["default"]
|
||||
_, ok := bundle.Config.Targets["default"]
|
||||
assert.False(t, ok)
|
||||
}
|
|
@ -27,14 +27,14 @@ func (m *defineDefaultWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle
|
|||
return fmt.Errorf("unable to define default workspace root: bundle name not defined")
|
||||
}
|
||||
|
||||
if b.Config.Bundle.Environment == "" {
|
||||
return fmt.Errorf("unable to define default workspace root: bundle environment not selected")
|
||||
if b.Config.Bundle.Target == "" {
|
||||
return fmt.Errorf("unable to define default workspace root: bundle target not selected")
|
||||
}
|
||||
|
||||
b.Config.Workspace.RootPath = fmt.Sprintf(
|
||||
"~/.bundle/%s/%s",
|
||||
b.Config.Bundle.Name,
|
||||
b.Config.Bundle.Environment,
|
||||
b.Config.Bundle.Target,
|
||||
)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -15,8 +15,8 @@ func TestDefaultWorkspaceRoot(t *testing.T) {
|
|||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Name: "name",
|
||||
Environment: "environment",
|
||||
Name: "name",
|
||||
Target: "environment",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -16,8 +16,10 @@ func TestExpandWorkspaceRoot(t *testing.T) {
|
|||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
CurrentUser: &iam.User{
|
||||
UserName: "jane@doe.com",
|
||||
CurrentUser: &config.User{
|
||||
User: &iam.User{
|
||||
UserName: "jane@doe.com",
|
||||
},
|
||||
},
|
||||
RootPath: "~/foo",
|
||||
},
|
||||
|
@ -32,8 +34,10 @@ func TestExpandWorkspaceRootDoesNothing(t *testing.T) {
|
|||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
CurrentUser: &iam.User{
|
||||
UserName: "jane@doe.com",
|
||||
CurrentUser: &config.User{
|
||||
User: &iam.User{
|
||||
UserName: "jane@doe.com",
|
||||
},
|
||||
},
|
||||
RootPath: "/Users/charly@doe.com/foo",
|
||||
},
|
||||
|
@ -48,8 +52,10 @@ func TestExpandWorkspaceRootWithoutRoot(t *testing.T) {
|
|||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
CurrentUser: &iam.User{
|
||||
UserName: "jane@doe.com",
|
||||
CurrentUser: &config.User{
|
||||
User: &iam.User{
|
||||
UserName: "jane@doe.com",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
|
|
@ -24,15 +24,20 @@ func (m *loadGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// load branch name if undefined
|
||||
if b.Config.Bundle.Git.Branch == "" {
|
||||
branch, err := repo.CurrentBranch()
|
||||
if err != nil {
|
||||
log.Warnf(ctx, "failed to load current branch: %s", err)
|
||||
} else {
|
||||
|
||||
// Read branch name of current checkout
|
||||
branch, err := repo.CurrentBranch()
|
||||
if err == nil {
|
||||
b.Config.Bundle.Git.ActualBranch = branch
|
||||
if b.Config.Bundle.Git.Branch == "" {
|
||||
// Only load branch if there's no user defined value
|
||||
b.Config.Bundle.Git.Inferred = true
|
||||
b.Config.Bundle.Git.Branch = branch
|
||||
}
|
||||
} else {
|
||||
log.Warnf(ctx, "failed to load current branch: %s", err)
|
||||
}
|
||||
|
||||
// load commit hash if undefined
|
||||
if b.Config.Bundle.Git.Commit == "" {
|
||||
commit, err := repo.LatestCommit()
|
||||
|
|
|
@ -2,17 +2,19 @@ package mutator
|
|||
|
||||
import (
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/scripts"
|
||||
)
|
||||
|
||||
func DefaultMutators() []bundle.Mutator {
|
||||
return []bundle.Mutator{
|
||||
DefineDefaultInclude(),
|
||||
scripts.Execute(config.ScriptPreInit),
|
||||
ProcessRootIncludes(),
|
||||
DefineDefaultEnvironment(),
|
||||
DefineDefaultTarget(),
|
||||
LoadGitDetails(),
|
||||
}
|
||||
}
|
||||
|
||||
func DefaultMutatorsForEnvironment(env string) []bundle.Mutator {
|
||||
return append(DefaultMutators(), SelectEnvironment(env))
|
||||
func DefaultMutatorsForTarget(env string) []bundle.Mutator {
|
||||
return append(DefaultMutators(), SelectTarget(env))
|
||||
}
|
||||
|
|
|
@ -3,11 +3,11 @@ package mutator
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
)
|
||||
|
||||
type overrideCompute struct{}
|
||||
|
@ -23,10 +23,10 @@ func (m *overrideCompute) Name() string {
|
|||
func overrideJobCompute(j *resources.Job, compute string) {
|
||||
for i := range j.Tasks {
|
||||
task := &j.Tasks[i]
|
||||
if task.NewCluster != nil {
|
||||
if task.NewCluster != nil || task.ExistingClusterId != "" || task.ComputeKey != "" || task.JobClusterKey != "" {
|
||||
task.NewCluster = nil
|
||||
task.ExistingClusterId = compute
|
||||
} else if task.ExistingClusterId != "" {
|
||||
task.JobClusterKey = ""
|
||||
task.ComputeKey = ""
|
||||
task.ExistingClusterId = compute
|
||||
}
|
||||
}
|
||||
|
@ -35,12 +35,12 @@ func overrideJobCompute(j *resources.Job, compute string) {
|
|||
func (m *overrideCompute) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
if b.Config.Bundle.Mode != config.Development {
|
||||
if b.Config.Bundle.ComputeID != "" {
|
||||
return fmt.Errorf("cannot override compute for an environment that does not use 'mode: development'")
|
||||
return fmt.Errorf("cannot override compute for an target that does not use 'mode: development'")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
if os.Getenv("DATABRICKS_CLUSTER_ID") != "" {
|
||||
b.Config.Bundle.ComputeID = os.Getenv("DATABRICKS_CLUSTER_ID")
|
||||
if v := env.Get(ctx, "DATABRICKS_CLUSTER_ID"); v != "" {
|
||||
b.Config.Bundle.ComputeID = v
|
||||
}
|
||||
|
||||
if b.Config.Bundle.ComputeID == "" {
|
||||
|
|
|
@ -2,7 +2,6 @@ package mutator_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -16,7 +15,7 @@ import (
|
|||
)
|
||||
|
||||
func TestOverrideDevelopment(t *testing.T) {
|
||||
os.Setenv("DATABRICKS_CLUSTER_ID", "")
|
||||
t.Setenv("DATABRICKS_CLUSTER_ID", "")
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
|
@ -34,6 +33,12 @@ func TestOverrideDevelopment(t *testing.T) {
|
|||
{
|
||||
ExistingClusterId: "cluster2",
|
||||
},
|
||||
{
|
||||
ComputeKey: "compute_key",
|
||||
},
|
||||
{
|
||||
JobClusterKey: "cluster_key",
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
|
@ -47,10 +52,16 @@ func TestOverrideDevelopment(t *testing.T) {
|
|||
assert.Nil(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
||||
assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||
assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||
assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[2].ExistingClusterId)
|
||||
assert.Equal(t, "newClusterID", bundle.Config.Resources.Jobs["job1"].Tasks[3].ExistingClusterId)
|
||||
|
||||
assert.Nil(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].NewCluster)
|
||||
assert.Empty(t, bundle.Config.Resources.Jobs["job1"].Tasks[2].ComputeKey)
|
||||
assert.Empty(t, bundle.Config.Resources.Jobs["job1"].Tasks[3].JobClusterKey)
|
||||
}
|
||||
|
||||
func TestOverrideDevelopmentEnv(t *testing.T) {
|
||||
os.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
|
@ -77,6 +88,31 @@ func TestOverrideDevelopmentEnv(t *testing.T) {
|
|||
assert.Equal(t, "cluster2", bundle.Config.Resources.Jobs["job1"].Tasks[1].ExistingClusterId)
|
||||
}
|
||||
|
||||
func TestOverridePipelineTask(t *testing.T) {
|
||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {JobSettings: &jobs.JobSettings{
|
||||
Name: "job1",
|
||||
Tasks: []jobs.Task{
|
||||
{
|
||||
PipelineTask: &jobs.PipelineTask{},
|
||||
},
|
||||
},
|
||||
}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mutator.OverrideCompute()
|
||||
err := m.Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Empty(t, bundle.Config.Resources.Jobs["job1"].Tasks[0].ExistingClusterId)
|
||||
}
|
||||
|
||||
func TestOverrideProduction(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
|
@ -107,7 +143,7 @@ func TestOverrideProduction(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestOverrideProductionEnv(t *testing.T) {
|
||||
os.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||
t.Setenv("DATABRICKS_CLUSTER_ID", "newClusterId")
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Resources: config.Resources{
|
||||
|
|
|
@ -2,8 +2,11 @@ package mutator
|
|||
|
||||
import (
|
||||
"context"
|
||||
"strings"
|
||||
"unicode"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
)
|
||||
|
||||
type populateCurrentUser struct{}
|
||||
|
@ -18,12 +21,32 @@ func (m *populateCurrentUser) Name() string {
|
|||
}
|
||||
|
||||
func (m *populateCurrentUser) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
if b.Config.Workspace.CurrentUser != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
w := b.WorkspaceClient()
|
||||
me, err := w.CurrentUser.Me(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.Config.Workspace.CurrentUser = me
|
||||
b.Config.Workspace.CurrentUser = &config.User{
|
||||
ShortName: getShortUserName(me.UserName),
|
||||
User: me,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get a short-form username, based on the user's primary email address.
|
||||
// We leave the full range of unicode letters in tact, but remove all "special" characters,
|
||||
// including dots, which are not supported in e.g. experiment names.
|
||||
func getShortUserName(emailAddress string) string {
|
||||
r := []rune(strings.Split(emailAddress, "@")[0])
|
||||
for i := 0; i < len(r); i++ {
|
||||
if !unicode.IsLetter(r[i]) {
|
||||
r[i] = '_'
|
||||
}
|
||||
}
|
||||
return string(r)
|
||||
}
|
||||
|
|
|
@ -1,3 +1,40 @@
|
|||
package mutator
|
||||
|
||||
// We need to implement workspace client mocking to implement this test.
|
||||
import "testing"
|
||||
|
||||
func TestPopulateCurrentUser(t *testing.T) {
|
||||
// We need to implement workspace client mocking to implement this test.
|
||||
}
|
||||
|
||||
func TestGetShortUserName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
email string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "test alphanumeric characters",
|
||||
email: "test.user@example.com",
|
||||
expected: "test_user",
|
||||
},
|
||||
{
|
||||
name: "test unicode characters",
|
||||
email: "tést.üser@example.com",
|
||||
expected: "tést_üser",
|
||||
},
|
||||
{
|
||||
name: "test special characters",
|
||||
email: "test$.user@example.com",
|
||||
expected: "test__user",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := getShortUserName(tt.email)
|
||||
if result != tt.expected {
|
||||
t.Errorf("getShortUserName(%q) = %q; expected %q", tt.email, result, tt.expected)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,89 +0,0 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
)
|
||||
|
||||
type processEnvironmentMode struct{}
|
||||
|
||||
const developmentConcurrentRuns = 4
|
||||
|
||||
func ProcessEnvironmentMode() bundle.Mutator {
|
||||
return &processEnvironmentMode{}
|
||||
}
|
||||
|
||||
func (m *processEnvironmentMode) Name() string {
|
||||
return "ProcessEnvironmentMode"
|
||||
}
|
||||
|
||||
// Mark all resources as being for 'development' purposes, i.e.
|
||||
// changing their their name, adding tags, and (in the future)
|
||||
// marking them as 'hidden' in the UI.
|
||||
func processDevelopmentMode(b *bundle.Bundle) error {
|
||||
r := b.Config.Resources
|
||||
|
||||
for i := range r.Jobs {
|
||||
r.Jobs[i].Name = "[dev] " + r.Jobs[i].Name
|
||||
if r.Jobs[i].Tags == nil {
|
||||
r.Jobs[i].Tags = make(map[string]string)
|
||||
}
|
||||
r.Jobs[i].Tags["dev"] = ""
|
||||
if r.Jobs[i].MaxConcurrentRuns == 0 {
|
||||
r.Jobs[i].MaxConcurrentRuns = developmentConcurrentRuns
|
||||
}
|
||||
if r.Jobs[i].Schedule != nil {
|
||||
r.Jobs[i].Schedule.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if r.Jobs[i].Continuous != nil {
|
||||
r.Jobs[i].Continuous.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if r.Jobs[i].Trigger != nil {
|
||||
r.Jobs[i].Trigger.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
}
|
||||
|
||||
for i := range r.Pipelines {
|
||||
r.Pipelines[i].Name = "[dev] " + r.Pipelines[i].Name
|
||||
r.Pipelines[i].Development = true
|
||||
// (pipelines don't yet support tags)
|
||||
}
|
||||
|
||||
for i := range r.Models {
|
||||
r.Models[i].Name = "[dev] " + r.Models[i].Name
|
||||
r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: ""})
|
||||
}
|
||||
|
||||
for i := range r.Experiments {
|
||||
filepath := r.Experiments[i].Name
|
||||
dir := path.Dir(filepath)
|
||||
base := path.Base(filepath)
|
||||
if dir == "." {
|
||||
r.Experiments[i].Name = "[dev] " + base
|
||||
} else {
|
||||
r.Experiments[i].Name = dir + "/[dev] " + base
|
||||
}
|
||||
r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: ""})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *processEnvironmentMode) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
switch b.Config.Bundle.Mode {
|
||||
case config.Development:
|
||||
return processDevelopmentMode(b)
|
||||
case "":
|
||||
// No action
|
||||
default:
|
||||
return fmt.Errorf("unsupported value specified for 'mode': %s", b.Config.Bundle.Mode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,77 +0,0 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestProcessEnvironmentModeApplyDebug(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Mode: config.Development,
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {JobSettings: &jobs.JobSettings{Name: "job1"}},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}},
|
||||
},
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"experiment1": {Experiment: &ml.Experiment{Name: "/Users/lennart.kats@databricks.com/experiment1"}},
|
||||
"experiment2": {Experiment: &ml.Experiment{Name: "experiment2"}},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"model1": {Model: &ml.Model{Name: "model1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mutator.ProcessEnvironmentMode()
|
||||
err := m.Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "[dev] job1", bundle.Config.Resources.Jobs["job1"].Name)
|
||||
assert.Equal(t, "[dev] pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name)
|
||||
assert.Equal(t, "/Users/lennart.kats@databricks.com/[dev] experiment1", bundle.Config.Resources.Experiments["experiment1"].Name)
|
||||
assert.Equal(t, "[dev] experiment2", bundle.Config.Resources.Experiments["experiment2"].Name)
|
||||
assert.Equal(t, "[dev] model1", bundle.Config.Resources.Models["model1"].Name)
|
||||
assert.Equal(t, "dev", bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key)
|
||||
assert.True(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
}
|
||||
|
||||
func TestProcessEnvironmentModeApplyDefault(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Mode: "",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {JobSettings: &jobs.JobSettings{Name: "job1"}},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := mutator.ProcessEnvironmentMode()
|
||||
err := m.Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name)
|
||||
assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name)
|
||||
assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
}
|
|
@ -3,14 +3,25 @@ package mutator
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"golang.org/x/exp/slices"
|
||||
"github.com/databricks/cli/bundle/env"
|
||||
)
|
||||
|
||||
// Get extra include paths from environment variable
|
||||
func getExtraIncludePaths(ctx context.Context) []string {
|
||||
value, exists := env.Includes(ctx)
|
||||
if !exists {
|
||||
return nil
|
||||
}
|
||||
return strings.Split(value, string(os.PathListSeparator))
|
||||
}
|
||||
|
||||
type processRootIncludes struct{}
|
||||
|
||||
// ProcessRootIncludes expands the patterns in the configuration's include list
|
||||
|
@ -27,14 +38,28 @@ func (m *processRootIncludes) Apply(ctx context.Context, b *bundle.Bundle) error
|
|||
var out []bundle.Mutator
|
||||
|
||||
// Map with files we've already seen to avoid loading them twice.
|
||||
var seen = map[string]bool{
|
||||
config.FileName: true,
|
||||
var seen = map[string]bool{}
|
||||
|
||||
for _, file := range config.FileNames {
|
||||
seen[file] = true
|
||||
}
|
||||
|
||||
// Maintain list of files in order of files being loaded.
|
||||
// This is stored in the bundle configuration for observability.
|
||||
var files []string
|
||||
|
||||
// Converts extra include paths from environment variable to relative paths
|
||||
for _, extraIncludePath := range getExtraIncludePaths(ctx) {
|
||||
if filepath.IsAbs(extraIncludePath) {
|
||||
rel, err := filepath.Rel(b.Config.Path, extraIncludePath)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to include file '%s': %w", extraIncludePath, err)
|
||||
}
|
||||
extraIncludePath = rel
|
||||
}
|
||||
b.Config.Include = append(b.Config.Include, extraIncludePath)
|
||||
}
|
||||
|
||||
// For each glob, find all files to load.
|
||||
// Ordering of the list of globs is maintained in the output.
|
||||
// For matches that appear in multiple globs, only the first is kept.
|
||||
|
|
|
@ -3,13 +3,16 @@ package mutator_test
|
|||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/env"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
@ -61,7 +64,7 @@ func TestProcessRootIncludesSingleGlob(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
touch(t, bundle.Config.Path, "bundle.yml")
|
||||
touch(t, bundle.Config.Path, "databricks.yml")
|
||||
touch(t, bundle.Config.Path, "a.yml")
|
||||
touch(t, bundle.Config.Path, "b.yml")
|
||||
|
||||
|
@ -122,3 +125,43 @@ func TestProcessRootIncludesNotExists(t *testing.T) {
|
|||
require.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "notexist.yml defined in 'include' section does not match any files")
|
||||
}
|
||||
|
||||
func TestProcessRootIncludesExtrasFromEnvVar(t *testing.T) {
|
||||
rootPath := t.TempDir()
|
||||
testYamlName := "extra_include_path.yml"
|
||||
touch(t, rootPath, testYamlName)
|
||||
t.Setenv(env.IncludesVariable, path.Join(rootPath, testYamlName))
|
||||
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: rootPath,
|
||||
},
|
||||
}
|
||||
|
||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Contains(t, bundle.Config.Include, testYamlName)
|
||||
}
|
||||
|
||||
func TestProcessRootIncludesDedupExtrasFromEnvVar(t *testing.T) {
|
||||
rootPath := t.TempDir()
|
||||
testYamlName := "extra_include_path.yml"
|
||||
touch(t, rootPath, testYamlName)
|
||||
t.Setenv(env.IncludesVariable, strings.Join(
|
||||
[]string{
|
||||
path.Join(rootPath, testYamlName),
|
||||
path.Join(rootPath, testYamlName),
|
||||
},
|
||||
string(os.PathListSeparator),
|
||||
))
|
||||
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: rootPath,
|
||||
},
|
||||
}
|
||||
|
||||
err := mutator.ProcessRootIncludes().Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, []string{testYamlName}, bundle.Config.Include)
|
||||
}
|
||||
|
|
|
@ -0,0 +1,178 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/libs/auth"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
)
|
||||
|
||||
type processTargetMode struct{}
|
||||
|
||||
const developmentConcurrentRuns = 4
|
||||
|
||||
func ProcessTargetMode() bundle.Mutator {
|
||||
return &processTargetMode{}
|
||||
}
|
||||
|
||||
func (m *processTargetMode) Name() string {
|
||||
return "ProcessTargetMode"
|
||||
}
|
||||
|
||||
// Mark all resources as being for 'development' purposes, i.e.
|
||||
// changing their their name, adding tags, and (in the future)
|
||||
// marking them as 'hidden' in the UI.
|
||||
func transformDevelopmentMode(b *bundle.Bundle) error {
|
||||
r := b.Config.Resources
|
||||
|
||||
prefix := "[dev " + b.Config.Workspace.CurrentUser.ShortName + "] "
|
||||
|
||||
for i := range r.Jobs {
|
||||
r.Jobs[i].Name = prefix + r.Jobs[i].Name
|
||||
if r.Jobs[i].Tags == nil {
|
||||
r.Jobs[i].Tags = make(map[string]string)
|
||||
}
|
||||
r.Jobs[i].Tags["dev"] = b.Config.Workspace.CurrentUser.DisplayName
|
||||
if r.Jobs[i].MaxConcurrentRuns == 0 {
|
||||
r.Jobs[i].MaxConcurrentRuns = developmentConcurrentRuns
|
||||
}
|
||||
if r.Jobs[i].Schedule != nil {
|
||||
r.Jobs[i].Schedule.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if r.Jobs[i].Continuous != nil {
|
||||
r.Jobs[i].Continuous.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
if r.Jobs[i].Trigger != nil {
|
||||
r.Jobs[i].Trigger.PauseStatus = jobs.PauseStatusPaused
|
||||
}
|
||||
}
|
||||
|
||||
for i := range r.Pipelines {
|
||||
r.Pipelines[i].Name = prefix + r.Pipelines[i].Name
|
||||
r.Pipelines[i].Development = true
|
||||
// (pipelines don't yet support tags)
|
||||
}
|
||||
|
||||
for i := range r.Models {
|
||||
r.Models[i].Name = prefix + r.Models[i].Name
|
||||
r.Models[i].Tags = append(r.Models[i].Tags, ml.ModelTag{Key: "dev", Value: ""})
|
||||
}
|
||||
|
||||
for i := range r.Experiments {
|
||||
filepath := r.Experiments[i].Name
|
||||
dir := path.Dir(filepath)
|
||||
base := path.Base(filepath)
|
||||
if dir == "." {
|
||||
r.Experiments[i].Name = prefix + base
|
||||
} else {
|
||||
r.Experiments[i].Name = dir + "/" + prefix + base
|
||||
}
|
||||
r.Experiments[i].Tags = append(r.Experiments[i].Tags, ml.ExperimentTag{Key: "dev", Value: b.Config.Workspace.CurrentUser.DisplayName})
|
||||
}
|
||||
|
||||
for i := range r.ModelServingEndpoints {
|
||||
prefix = "dev_" + b.Config.Workspace.CurrentUser.ShortName + "_"
|
||||
r.ModelServingEndpoints[i].Name = prefix + r.ModelServingEndpoints[i].Name
|
||||
// (model serving doesn't yet support tags)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateDevelopmentMode(b *bundle.Bundle) error {
|
||||
if path := findIncorrectPath(b, config.Development); path != "" {
|
||||
return fmt.Errorf("%s must start with '~/' or contain the current username when using 'mode: development'", path)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func findIncorrectPath(b *bundle.Bundle, mode config.Mode) string {
|
||||
username := b.Config.Workspace.CurrentUser.UserName
|
||||
containsExpected := true
|
||||
if mode == config.Production {
|
||||
containsExpected = false
|
||||
}
|
||||
|
||||
if strings.Contains(b.Config.Workspace.RootPath, username) != containsExpected && b.Config.Workspace.RootPath != "" {
|
||||
return "root_path"
|
||||
}
|
||||
if strings.Contains(b.Config.Workspace.StatePath, username) != containsExpected {
|
||||
return "state_path"
|
||||
}
|
||||
if strings.Contains(b.Config.Workspace.FilesPath, username) != containsExpected {
|
||||
return "files_path"
|
||||
}
|
||||
if strings.Contains(b.Config.Workspace.ArtifactsPath, username) != containsExpected {
|
||||
return "artifacts_path"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func validateProductionMode(ctx context.Context, b *bundle.Bundle, isPrincipalUsed bool) error {
|
||||
if b.Config.Bundle.Git.Inferred {
|
||||
env := b.Config.Bundle.Target
|
||||
log.Warnf(ctx, "target with 'mode: production' should specify an explicit 'targets.%s.git' configuration", env)
|
||||
}
|
||||
|
||||
r := b.Config.Resources
|
||||
for i := range r.Pipelines {
|
||||
if r.Pipelines[i].Development {
|
||||
return fmt.Errorf("target with 'mode: production' cannot specify a pipeline with 'development: true'")
|
||||
}
|
||||
}
|
||||
|
||||
if !isPrincipalUsed {
|
||||
if path := findIncorrectPath(b, config.Production); path != "" {
|
||||
message := "%s must not contain the current username when using 'mode: production'"
|
||||
if path == "root_path" {
|
||||
return fmt.Errorf(message+"\n tip: set workspace.root_path to a shared path such as /Shared/.bundle/${bundle.name}/${bundle.target}", path)
|
||||
} else {
|
||||
return fmt.Errorf(message, path)
|
||||
}
|
||||
}
|
||||
|
||||
if !isRunAsSet(r) {
|
||||
return fmt.Errorf("'run_as' must be set for all jobs when using 'mode: production'")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Determines whether run_as is explicitly set for all resources.
|
||||
// We do this in a best-effort fashion rather than check the top-level
|
||||
// 'run_as' field because the latter is not required to be set.
|
||||
func isRunAsSet(r config.Resources) bool {
|
||||
for i := range r.Jobs {
|
||||
if r.Jobs[i].RunAs == nil {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (m *processTargetMode) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
switch b.Config.Bundle.Mode {
|
||||
case config.Development:
|
||||
err := validateDevelopmentMode(b)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return transformDevelopmentMode(b)
|
||||
case config.Production:
|
||||
isPrincipal := auth.IsServicePrincipal(b.Config.Workspace.CurrentUser.Id)
|
||||
return validateProductionMode(ctx, b, isPrincipal)
|
||||
case "":
|
||||
// No action
|
||||
default:
|
||||
return fmt.Errorf("unsupported value '%s' specified for 'mode': must be either 'development' or 'production'", b.Config.Bundle.Mode)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,187 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
"github.com/databricks/databricks-sdk-go/service/serving"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func mockBundle(mode config.Mode) *bundle.Bundle {
|
||||
return &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Mode: mode,
|
||||
Git: config.Git{
|
||||
OriginURL: "http://origin",
|
||||
Branch: "main",
|
||||
},
|
||||
},
|
||||
Workspace: config.Workspace{
|
||||
CurrentUser: &config.User{
|
||||
ShortName: "lennart",
|
||||
User: &iam.User{
|
||||
UserName: "lennart@company.com",
|
||||
Id: "1",
|
||||
},
|
||||
},
|
||||
StatePath: "/Users/lennart@company.com/.bundle/x/y/state",
|
||||
ArtifactsPath: "/Users/lennart@company.com/.bundle/x/y/artifacts",
|
||||
FilesPath: "/Users/lennart@company.com/.bundle/x/y/files",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job1": {JobSettings: &jobs.JobSettings{Name: "job1"}},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline1": {PipelineSpec: &pipelines.PipelineSpec{Name: "pipeline1"}},
|
||||
},
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"experiment1": {Experiment: &ml.Experiment{Name: "/Users/lennart.kats@databricks.com/experiment1"}},
|
||||
"experiment2": {Experiment: &ml.Experiment{Name: "experiment2"}},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"model1": {Model: &ml.Model{Name: "model1"}},
|
||||
},
|
||||
ModelServingEndpoints: map[string]*resources.ModelServingEndpoint{
|
||||
"servingendpoint1": {CreateServingEndpoint: &serving.CreateServingEndpoint{Name: "servingendpoint1"}},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessTargetModeDevelopment(t *testing.T) {
|
||||
bundle := mockBundle(config.Development)
|
||||
|
||||
m := ProcessTargetMode()
|
||||
err := m.Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "[dev lennart] job1", bundle.Config.Resources.Jobs["job1"].Name)
|
||||
assert.Equal(t, "[dev lennart] pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name)
|
||||
assert.Equal(t, "/Users/lennart.kats@databricks.com/[dev lennart] experiment1", bundle.Config.Resources.Experiments["experiment1"].Name)
|
||||
assert.Equal(t, "[dev lennart] experiment2", bundle.Config.Resources.Experiments["experiment2"].Name)
|
||||
assert.Equal(t, "[dev lennart] model1", bundle.Config.Resources.Models["model1"].Name)
|
||||
assert.Equal(t, "dev_lennart_servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||
assert.Equal(t, "dev", bundle.Config.Resources.Experiments["experiment1"].Experiment.Tags[0].Key)
|
||||
assert.True(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
}
|
||||
|
||||
func TestProcessTargetModeDefault(t *testing.T) {
|
||||
bundle := mockBundle("")
|
||||
|
||||
m := ProcessTargetMode()
|
||||
err := m.Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name)
|
||||
assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name)
|
||||
assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
assert.Equal(t, "servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||
}
|
||||
|
||||
func TestProcessTargetModeProduction(t *testing.T) {
|
||||
bundle := mockBundle(config.Production)
|
||||
|
||||
err := validateProductionMode(context.Background(), bundle, false)
|
||||
require.ErrorContains(t, err, "state_path")
|
||||
|
||||
bundle.Config.Workspace.StatePath = "/Shared/.bundle/x/y/state"
|
||||
bundle.Config.Workspace.ArtifactsPath = "/Shared/.bundle/x/y/artifacts"
|
||||
bundle.Config.Workspace.FilesPath = "/Shared/.bundle/x/y/files"
|
||||
|
||||
err = validateProductionMode(context.Background(), bundle, false)
|
||||
require.ErrorContains(t, err, "production")
|
||||
|
||||
permissions := []resources.Permission{
|
||||
{
|
||||
Level: "CAN_MANAGE",
|
||||
UserName: "user@company.com",
|
||||
},
|
||||
}
|
||||
bundle.Config.Resources.Jobs["job1"].Permissions = permissions
|
||||
bundle.Config.Resources.Jobs["job1"].RunAs = &jobs.JobRunAs{UserName: "user@company.com"}
|
||||
bundle.Config.Resources.Pipelines["pipeline1"].Permissions = permissions
|
||||
bundle.Config.Resources.Experiments["experiment1"].Permissions = permissions
|
||||
bundle.Config.Resources.Experiments["experiment2"].Permissions = permissions
|
||||
bundle.Config.Resources.Models["model1"].Permissions = permissions
|
||||
bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Permissions = permissions
|
||||
|
||||
err = validateProductionMode(context.Background(), bundle, false)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, "job1", bundle.Config.Resources.Jobs["job1"].Name)
|
||||
assert.Equal(t, "pipeline1", bundle.Config.Resources.Pipelines["pipeline1"].Name)
|
||||
assert.False(t, bundle.Config.Resources.Pipelines["pipeline1"].PipelineSpec.Development)
|
||||
assert.Equal(t, "servingendpoint1", bundle.Config.Resources.ModelServingEndpoints["servingendpoint1"].Name)
|
||||
}
|
||||
|
||||
func TestProcessTargetModeProductionOkForPrincipal(t *testing.T) {
|
||||
bundle := mockBundle(config.Production)
|
||||
|
||||
// Our target has all kinds of problems when not using service principals ...
|
||||
err := validateProductionMode(context.Background(), bundle, false)
|
||||
require.Error(t, err)
|
||||
|
||||
// ... but we're much less strict when a principal is used
|
||||
err = validateProductionMode(context.Background(), bundle, true)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Make sure that we have test coverage for all resource types
|
||||
func TestAllResourcesMocked(t *testing.T) {
|
||||
bundle := mockBundle(config.Development)
|
||||
resources := reflect.ValueOf(bundle.Config.Resources)
|
||||
|
||||
for i := 0; i < resources.NumField(); i++ {
|
||||
field := resources.Field(i)
|
||||
if field.Kind() == reflect.Map {
|
||||
assert.True(
|
||||
t,
|
||||
!field.IsNil() && field.Len() > 0,
|
||||
"process_target_mode should support '%s' (please add it to process_target_mode.go and extend the test suite)",
|
||||
resources.Type().Field(i).Name,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure that we at least rename all resources
|
||||
func TestAllResourcesRenamed(t *testing.T) {
|
||||
bundle := mockBundle(config.Development)
|
||||
resources := reflect.ValueOf(bundle.Config.Resources)
|
||||
|
||||
m := ProcessTargetMode()
|
||||
err := m.Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
|
||||
for i := 0; i < resources.NumField(); i++ {
|
||||
field := resources.Field(i)
|
||||
|
||||
if field.Kind() == reflect.Map {
|
||||
for _, key := range field.MapKeys() {
|
||||
resource := field.MapIndex(key)
|
||||
nameField := resource.Elem().FieldByName("Name")
|
||||
if nameField.IsValid() && nameField.Kind() == reflect.String {
|
||||
assert.True(
|
||||
t,
|
||||
strings.Contains(nameField.String(), "dev"),
|
||||
"process_target_mode should rename '%s' in '%s'",
|
||||
key,
|
||||
resources.Type().Field(i).Name,
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"slices"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
)
|
||||
|
||||
type setRunAs struct {
|
||||
}
|
||||
|
||||
// SetRunAs mutator is used to go over defined resources such as Jobs and DLT Pipelines
|
||||
// And set correct execution identity ("run_as" for a job or "is_owner" permission for DLT)
|
||||
// if top-level "run-as" section is defined in the configuration.
|
||||
func SetRunAs() bundle.Mutator {
|
||||
return &setRunAs{}
|
||||
}
|
||||
|
||||
func (m *setRunAs) Name() string {
|
||||
return "SetRunAs"
|
||||
}
|
||||
|
||||
func (m *setRunAs) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
runAs := b.Config.RunAs
|
||||
if runAs == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range b.Config.Resources.Jobs {
|
||||
job := b.Config.Resources.Jobs[i]
|
||||
if job.RunAs != nil {
|
||||
continue
|
||||
}
|
||||
job.RunAs = &jobs.JobRunAs{
|
||||
ServicePrincipalName: runAs.ServicePrincipalName,
|
||||
UserName: runAs.UserName,
|
||||
}
|
||||
}
|
||||
|
||||
me := b.Config.Workspace.CurrentUser.UserName
|
||||
// If user deploying the bundle and the one defined in run_as are the same
|
||||
// Do not add IS_OWNER permission. Current user is implied to be an owner in this case.
|
||||
// Otherwise, it will fail due to this bug https://github.com/databricks/terraform-provider-databricks/issues/2407
|
||||
if runAs.UserName == me || runAs.ServicePrincipalName == me {
|
||||
return nil
|
||||
}
|
||||
|
||||
for i := range b.Config.Resources.Pipelines {
|
||||
pipeline := b.Config.Resources.Pipelines[i]
|
||||
pipeline.Permissions = slices.DeleteFunc(pipeline.Permissions, func(p resources.Permission) bool {
|
||||
return (runAs.ServicePrincipalName != "" && p.ServicePrincipalName == runAs.ServicePrincipalName) ||
|
||||
(runAs.UserName != "" && p.UserName == runAs.UserName)
|
||||
})
|
||||
pipeline.Permissions = append(pipeline.Permissions, resources.Permission{
|
||||
Level: "IS_OWNER",
|
||||
ServicePrincipalName: runAs.ServicePrincipalName,
|
||||
UserName: runAs.UserName,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,54 +0,0 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
type selectDefaultEnvironment struct{}
|
||||
|
||||
// SelectDefaultEnvironment merges the default environment into the root configuration.
|
||||
func SelectDefaultEnvironment() bundle.Mutator {
|
||||
return &selectDefaultEnvironment{}
|
||||
}
|
||||
|
||||
func (m *selectDefaultEnvironment) Name() string {
|
||||
return "SelectDefaultEnvironment"
|
||||
}
|
||||
|
||||
func (m *selectDefaultEnvironment) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
if len(b.Config.Environments) == 0 {
|
||||
return fmt.Errorf("no environments defined")
|
||||
}
|
||||
|
||||
// One environment means there's only one default.
|
||||
names := maps.Keys(b.Config.Environments)
|
||||
if len(names) == 1 {
|
||||
return SelectEnvironment(names[0]).Apply(ctx, b)
|
||||
}
|
||||
|
||||
// Multiple environments means we look for the `default` flag.
|
||||
var defaults []string
|
||||
for name, env := range b.Config.Environments {
|
||||
if env != nil && env.Default {
|
||||
defaults = append(defaults, name)
|
||||
}
|
||||
}
|
||||
|
||||
// It is invalid to have multiple environments with the `default` flag set.
|
||||
if len(defaults) > 1 {
|
||||
return fmt.Errorf("multiple environments are marked as default (%s)", strings.Join(defaults, ", "))
|
||||
}
|
||||
|
||||
// If no environment has the `default` flag set, ask the user to specify one.
|
||||
if len(defaults) == 0 {
|
||||
return fmt.Errorf("please specify environment")
|
||||
}
|
||||
|
||||
// One default remaining.
|
||||
return SelectEnvironment(defaults[0]).Apply(ctx, b)
|
||||
}
|
|
@ -1,90 +0,0 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSelectDefaultEnvironmentNoEnvironments(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Environments: map[string]*config.Environment{},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "no environments defined")
|
||||
}
|
||||
|
||||
func TestSelectDefaultEnvironmentSingleEnvironments(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Environments: map[string]*config.Environment{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "foo", bundle.Config.Bundle.Environment)
|
||||
}
|
||||
|
||||
func TestSelectDefaultEnvironmentNoDefaults(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Environments: map[string]*config.Environment{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
"qux": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "please specify environment")
|
||||
}
|
||||
|
||||
func TestSelectDefaultEnvironmentNoDefaultsWithNil(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Environments: map[string]*config.Environment{
|
||||
"foo": nil,
|
||||
"bar": nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "please specify environment")
|
||||
}
|
||||
|
||||
func TestSelectDefaultEnvironmentMultipleDefaults(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Environments: map[string]*config.Environment{
|
||||
"foo": {Default: true},
|
||||
"bar": {Default: true},
|
||||
"qux": {Default: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "multiple environments are marked as default")
|
||||
}
|
||||
|
||||
func TestSelectDefaultEnvironmentSingleDefault(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Environments: map[string]*config.Environment{
|
||||
"foo": {},
|
||||
"bar": {Default: true},
|
||||
"qux": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultEnvironment().Apply(context.Background(), bundle)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "bar", bundle.Config.Bundle.Environment)
|
||||
}
|
|
@ -0,0 +1,54 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
type selectDefaultTarget struct{}
|
||||
|
||||
// SelectDefaultTarget merges the default target into the root configuration.
|
||||
func SelectDefaultTarget() bundle.Mutator {
|
||||
return &selectDefaultTarget{}
|
||||
}
|
||||
|
||||
func (m *selectDefaultTarget) Name() string {
|
||||
return "SelectDefaultTarget"
|
||||
}
|
||||
|
||||
func (m *selectDefaultTarget) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
if len(b.Config.Targets) == 0 {
|
||||
return fmt.Errorf("no targets defined")
|
||||
}
|
||||
|
||||
// One target means there's only one default.
|
||||
names := maps.Keys(b.Config.Targets)
|
||||
if len(names) == 1 {
|
||||
return SelectTarget(names[0]).Apply(ctx, b)
|
||||
}
|
||||
|
||||
// Multiple targets means we look for the `default` flag.
|
||||
var defaults []string
|
||||
for name, env := range b.Config.Targets {
|
||||
if env != nil && env.Default {
|
||||
defaults = append(defaults, name)
|
||||
}
|
||||
}
|
||||
|
||||
// It is invalid to have multiple targets with the `default` flag set.
|
||||
if len(defaults) > 1 {
|
||||
return fmt.Errorf("multiple targets are marked as default (%s)", strings.Join(defaults, ", "))
|
||||
}
|
||||
|
||||
// If no target has the `default` flag set, ask the user to specify one.
|
||||
if len(defaults) == 0 {
|
||||
return fmt.Errorf("please specify target")
|
||||
}
|
||||
|
||||
// One default remaining.
|
||||
return SelectTarget(defaults[0]).Apply(ctx, b)
|
||||
}
|
|
@ -0,0 +1,90 @@
|
|||
package mutator_test
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestSelectDefaultTargetNoTargets(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Targets: map[string]*config.Target{},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "no targets defined")
|
||||
}
|
||||
|
||||
func TestSelectDefaultTargetSingleTargets(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Targets: map[string]*config.Target{
|
||||
"foo": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "foo", bundle.Config.Bundle.Target)
|
||||
}
|
||||
|
||||
func TestSelectDefaultTargetNoDefaults(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Targets: map[string]*config.Target{
|
||||
"foo": {},
|
||||
"bar": {},
|
||||
"qux": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "please specify target")
|
||||
}
|
||||
|
||||
func TestSelectDefaultTargetNoDefaultsWithNil(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Targets: map[string]*config.Target{
|
||||
"foo": nil,
|
||||
"bar": nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "please specify target")
|
||||
}
|
||||
|
||||
func TestSelectDefaultTargetMultipleDefaults(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Targets: map[string]*config.Target{
|
||||
"foo": {Default: true},
|
||||
"bar": {Default: true},
|
||||
"qux": {Default: true},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle)
|
||||
assert.ErrorContains(t, err, "multiple targets are marked as default")
|
||||
}
|
||||
|
||||
func TestSelectDefaultTargetSingleDefault(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Targets: map[string]*config.Target{
|
||||
"foo": {},
|
||||
"bar": {Default: true},
|
||||
"qux": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectDefaultTarget().Apply(context.Background(), bundle)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "bar", bundle.Config.Bundle.Target)
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
)
|
||||
|
||||
type selectEnvironment struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// SelectEnvironment merges the specified environment into the root configuration.
|
||||
func SelectEnvironment(name string) bundle.Mutator {
|
||||
return &selectEnvironment{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *selectEnvironment) Name() string {
|
||||
return fmt.Sprintf("SelectEnvironment(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *selectEnvironment) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
if b.Config.Environments == nil {
|
||||
return fmt.Errorf("no environments defined")
|
||||
}
|
||||
|
||||
// Get specified environment
|
||||
env, ok := b.Config.Environments[m.name]
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: no such environment", m.name)
|
||||
}
|
||||
|
||||
// Merge specified environment into root configuration structure.
|
||||
err := b.Config.MergeEnvironment(env)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store specified environment in configuration for reference.
|
||||
b.Config.Bundle.Environment = m.name
|
||||
|
||||
// Clear environments after loading.
|
||||
b.Config.Environments = nil
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,56 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"golang.org/x/exp/maps"
|
||||
)
|
||||
|
||||
type selectTarget struct {
|
||||
name string
|
||||
}
|
||||
|
||||
// SelectTarget merges the specified target into the root configuration.
|
||||
func SelectTarget(name string) bundle.Mutator {
|
||||
return &selectTarget{
|
||||
name: name,
|
||||
}
|
||||
}
|
||||
|
||||
func (m *selectTarget) Name() string {
|
||||
return fmt.Sprintf("SelectTarget(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *selectTarget) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
if b.Config.Targets == nil {
|
||||
return fmt.Errorf("no targets defined")
|
||||
}
|
||||
|
||||
// Get specified target
|
||||
target, ok := b.Config.Targets[m.name]
|
||||
if !ok {
|
||||
return fmt.Errorf("%s: no such target. Available targets: %s", m.name, strings.Join(maps.Keys(b.Config.Targets), ", "))
|
||||
}
|
||||
|
||||
// Merge specified target into root configuration structure.
|
||||
err := b.Config.MergeTargetOverrides(target)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Store specified target in configuration for reference.
|
||||
b.Config.Bundle.Target = m.name
|
||||
|
||||
// We do this for backward compatibility.
|
||||
// TODO: remove when Environments section is not supported anymore.
|
||||
b.Config.Bundle.Environment = b.Config.Bundle.Target
|
||||
|
||||
// Clear targets after loading.
|
||||
b.Config.Targets = nil
|
||||
b.Config.Environments = nil
|
||||
|
||||
return nil
|
||||
}
|
|
@ -11,13 +11,13 @@ import (
|
|||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestSelectEnvironment(t *testing.T) {
|
||||
func TestSelectTarget(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Workspace: config.Workspace{
|
||||
Host: "foo",
|
||||
},
|
||||
Environments: map[string]*config.Environment{
|
||||
Targets: map[string]*config.Target{
|
||||
"default": {
|
||||
Workspace: &config.Workspace{
|
||||
Host: "bar",
|
||||
|
@ -26,19 +26,19 @@ func TestSelectEnvironment(t *testing.T) {
|
|||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectEnvironment("default").Apply(context.Background(), bundle)
|
||||
err := mutator.SelectTarget("default").Apply(context.Background(), bundle)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "bar", bundle.Config.Workspace.Host)
|
||||
}
|
||||
|
||||
func TestSelectEnvironmentNotFound(t *testing.T) {
|
||||
func TestSelectTargetNotFound(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Environments: map[string]*config.Environment{
|
||||
Targets: map[string]*config.Target{
|
||||
"default": {},
|
||||
},
|
||||
},
|
||||
}
|
||||
err := mutator.SelectEnvironment("doesnt-exist").Apply(context.Background(), bundle)
|
||||
require.Error(t, err, "no environments defined")
|
||||
err := mutator.SelectTarget("doesnt-exist").Apply(context.Background(), bundle)
|
||||
require.Error(t, err, "no targets defined")
|
||||
}
|
|
@ -3,10 +3,10 @@ package mutator
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
"github.com/databricks/cli/libs/env"
|
||||
)
|
||||
|
||||
const bundleVarPrefix = "BUNDLE_VAR_"
|
||||
|
@ -21,7 +21,7 @@ func (m *setVariables) Name() string {
|
|||
return "SetVariables"
|
||||
}
|
||||
|
||||
func setVariable(v *variable.Variable, name string) error {
|
||||
func setVariable(ctx context.Context, v *variable.Variable, name string) error {
|
||||
// case: variable already has value initialized, so skip
|
||||
if v.HasValue() {
|
||||
return nil
|
||||
|
@ -29,7 +29,7 @@ func setVariable(v *variable.Variable, name string) error {
|
|||
|
||||
// case: read and set variable value from process environment
|
||||
envVarName := bundleVarPrefix + name
|
||||
if val, ok := os.LookupEnv(envVarName); ok {
|
||||
if val, ok := env.Lookup(ctx, envVarName); ok {
|
||||
err := v.Set(val)
|
||||
if err != nil {
|
||||
return fmt.Errorf(`failed to assign value "%s" to variable %s from environment variable %s with error: %w`, val, name, envVarName, err)
|
||||
|
@ -54,7 +54,7 @@ func setVariable(v *variable.Variable, name string) error {
|
|||
|
||||
func (m *setVariables) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
for name, variable := range b.Config.Variables {
|
||||
err := setVariable(variable, name)
|
||||
err := setVariable(ctx, variable, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -21,7 +21,7 @@ func TestSetVariableFromProcessEnvVar(t *testing.T) {
|
|||
// set value for variable as an environment variable
|
||||
t.Setenv("BUNDLE_VAR_foo", "process-env")
|
||||
|
||||
err := setVariable(&variable, "foo")
|
||||
err := setVariable(context.Background(), &variable, "foo")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, *variable.Value, "process-env")
|
||||
}
|
||||
|
@ -33,7 +33,7 @@ func TestSetVariableUsingDefaultValue(t *testing.T) {
|
|||
Default: &defaultVal,
|
||||
}
|
||||
|
||||
err := setVariable(&variable, "foo")
|
||||
err := setVariable(context.Background(), &variable, "foo")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, *variable.Value, "default")
|
||||
}
|
||||
|
@ -49,7 +49,7 @@ func TestSetVariableWhenAlreadyAValueIsAssigned(t *testing.T) {
|
|||
|
||||
// since a value is already assigned to the variable, it would not be overridden
|
||||
// by the default value
|
||||
err := setVariable(&variable, "foo")
|
||||
err := setVariable(context.Background(), &variable, "foo")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, *variable.Value, "assigned-value")
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ func TestSetVariableEnvVarValueDoesNotOverridePresetValue(t *testing.T) {
|
|||
|
||||
// since a value is already assigned to the variable, it would not be overridden
|
||||
// by the value from environment
|
||||
err := setVariable(&variable, "foo")
|
||||
err := setVariable(context.Background(), &variable, "foo")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, *variable.Value, "assigned-value")
|
||||
}
|
||||
|
@ -79,7 +79,7 @@ func TestSetVariablesErrorsIfAValueCouldNotBeResolved(t *testing.T) {
|
|||
}
|
||||
|
||||
// fails because we could not resolve a value for the variable
|
||||
err := setVariable(&variable, "foo")
|
||||
err := setVariable(context.Background(), &variable, "foo")
|
||||
assert.ErrorContains(t, err, "no value assigned to required variable foo. Assignment can be done through the \"--var\" flag or by setting the BUNDLE_VAR_foo environment variable")
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,100 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"text/template"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
)
|
||||
|
||||
type TaskWithJobKey struct {
|
||||
Task *jobs.Task
|
||||
JobKey string
|
||||
}
|
||||
|
||||
type TrampolineFunctions interface {
|
||||
GetTemplateData(task *jobs.Task) (map[string]any, error)
|
||||
GetTasks(b *bundle.Bundle) []TaskWithJobKey
|
||||
CleanUp(task *jobs.Task) error
|
||||
}
|
||||
type trampoline struct {
|
||||
name string
|
||||
functions TrampolineFunctions
|
||||
template string
|
||||
}
|
||||
|
||||
func NewTrampoline(
|
||||
name string,
|
||||
functions TrampolineFunctions,
|
||||
template string,
|
||||
) *trampoline {
|
||||
return &trampoline{name, functions, template}
|
||||
}
|
||||
|
||||
func (m *trampoline) Name() string {
|
||||
return fmt.Sprintf("trampoline(%s)", m.name)
|
||||
}
|
||||
|
||||
func (m *trampoline) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
tasks := m.functions.GetTasks(b)
|
||||
for _, task := range tasks {
|
||||
err := m.generateNotebookWrapper(ctx, b, task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (m *trampoline) generateNotebookWrapper(ctx context.Context, b *bundle.Bundle, task TaskWithJobKey) error {
|
||||
internalDir, err := b.InternalDir(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
notebookName := fmt.Sprintf("notebook_%s_%s", task.JobKey, task.Task.TaskKey)
|
||||
localNotebookPath := filepath.Join(internalDir, notebookName+".py")
|
||||
|
||||
err = os.MkdirAll(filepath.Dir(localNotebookPath), 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
f, err := os.Create(localNotebookPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
data, err := m.functions.GetTemplateData(task.Task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t, err := template.New(notebookName).Parse(m.template)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
internalDirRel, err := filepath.Rel(b.Config.Path, internalDir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = m.functions.CleanUp(task.Task)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
remotePath := path.Join(b.Config.Workspace.FilesPath, filepath.ToSlash(internalDirRel), notebookName)
|
||||
|
||||
task.Task.NotebookTask = &jobs.NotebookTask{
|
||||
NotebookPath: remotePath,
|
||||
}
|
||||
|
||||
return t.Execute(f, data)
|
||||
}
|
|
@ -0,0 +1,98 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
type functions struct{}
|
||||
|
||||
func (f *functions) GetTasks(b *bundle.Bundle) []TaskWithJobKey {
|
||||
tasks := make([]TaskWithJobKey, 0)
|
||||
for k := range b.Config.Resources.Jobs["test"].Tasks {
|
||||
tasks = append(tasks, TaskWithJobKey{
|
||||
JobKey: "test",
|
||||
Task: &b.Config.Resources.Jobs["test"].Tasks[k],
|
||||
})
|
||||
}
|
||||
|
||||
return tasks
|
||||
}
|
||||
|
||||
func (f *functions) GetTemplateData(task *jobs.Task) (map[string]any, error) {
|
||||
if task.PythonWheelTask == nil {
|
||||
return nil, fmt.Errorf("PythonWheelTask cannot be nil")
|
||||
}
|
||||
|
||||
data := make(map[string]any)
|
||||
data["MyName"] = "Trampoline"
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (f *functions) CleanUp(task *jobs.Task) error {
|
||||
task.PythonWheelTask = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestGenerateTrampoline(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
tasks := []jobs.Task{
|
||||
{
|
||||
TaskKey: "to_trampoline",
|
||||
PythonWheelTask: &jobs.PythonWheelTask{
|
||||
PackageName: "test",
|
||||
EntryPoint: "run",
|
||||
}},
|
||||
}
|
||||
|
||||
b := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Path: tmpDir,
|
||||
Bundle: config.Bundle{
|
||||
Target: "development",
|
||||
},
|
||||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"test": {
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: tmpDir,
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
Tasks: tasks,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
funcs := functions{}
|
||||
trampoline := NewTrampoline("test_trampoline", &funcs, "Hello from {{.MyName}}")
|
||||
err := bundle.Apply(ctx, b, trampoline)
|
||||
require.NoError(t, err)
|
||||
|
||||
dir, err := b.InternalDir(ctx)
|
||||
require.NoError(t, err)
|
||||
filename := filepath.Join(dir, "notebook_test_to_trampoline.py")
|
||||
|
||||
bytes, err := os.ReadFile(filename)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "Hello from Trampoline", string(bytes))
|
||||
|
||||
task := b.Config.Resources.Jobs["test"].Tasks[0]
|
||||
require.Equal(t, task.NotebookTask.NotebookPath, ".databricks/bundle/development/.internal/notebook_test_to_trampoline")
|
||||
require.Nil(t, task.PythonWheelTask)
|
||||
}
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -11,8 +12,6 @@ import (
|
|||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/libs/notebook"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
)
|
||||
|
||||
type ErrIsNotebook struct {
|
||||
|
@ -44,7 +43,9 @@ func (m *translatePaths) Name() string {
|
|||
return "TranslatePaths"
|
||||
}
|
||||
|
||||
// rewritePath converts a given relative path to a stable remote workspace path.
|
||||
type rewriteFunc func(literal, localFullPath, localRelPath, remotePath string) (string, error)
|
||||
|
||||
// rewritePath converts a given relative path from the loaded config to a new path based on the passed rewriting function
|
||||
//
|
||||
// It takes these arguments:
|
||||
// - The argument `dir` is the directory relative to which the given relative path is.
|
||||
|
@ -57,13 +58,23 @@ func (m *translatePaths) rewritePath(
|
|||
dir string,
|
||||
b *bundle.Bundle,
|
||||
p *string,
|
||||
fn func(literal, localPath, remotePath string) (string, error),
|
||||
fn rewriteFunc,
|
||||
) error {
|
||||
// We assume absolute paths point to a location in the workspace
|
||||
if path.IsAbs(filepath.ToSlash(*p)) {
|
||||
return nil
|
||||
}
|
||||
|
||||
url, err := url.Parse(*p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If the file path has scheme, it's a full path and we don't need to transform it
|
||||
if url.Scheme != "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Local path is relative to the directory the resource was defined in.
|
||||
localPath := filepath.Join(dir, filepath.FromSlash(*p))
|
||||
if interp, ok := m.seen[localPath]; ok {
|
||||
|
@ -72,19 +83,19 @@ func (m *translatePaths) rewritePath(
|
|||
}
|
||||
|
||||
// Remote path must be relative to the bundle root.
|
||||
remotePath, err := filepath.Rel(b.Config.Path, localPath)
|
||||
localRelPath, err := filepath.Rel(b.Config.Path, localPath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasPrefix(remotePath, "..") {
|
||||
if strings.HasPrefix(localRelPath, "..") {
|
||||
return fmt.Errorf("path %s is not contained in bundle root path", localPath)
|
||||
}
|
||||
|
||||
// Prefix remote path with its remote root path.
|
||||
remotePath = path.Join(b.Config.Workspace.FilesPath, filepath.ToSlash(remotePath))
|
||||
remotePath := path.Join(b.Config.Workspace.FilesPath, filepath.ToSlash(localRelPath))
|
||||
|
||||
// Convert local path into workspace path via specified function.
|
||||
interp, err := fn(*p, localPath, filepath.ToSlash(remotePath))
|
||||
interp, err := fn(*p, localPath, localRelPath, filepath.ToSlash(remotePath))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -94,81 +105,69 @@ func (m *translatePaths) rewritePath(
|
|||
return nil
|
||||
}
|
||||
|
||||
func (m *translatePaths) translateNotebookPath(literal, localPath, remotePath string) (string, error) {
|
||||
nb, _, err := notebook.Detect(localPath)
|
||||
func translateNotebookPath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
nb, _, err := notebook.Detect(localFullPath)
|
||||
if os.IsNotExist(err) {
|
||||
return "", fmt.Errorf("notebook %s not found", literal)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localPath, err)
|
||||
return "", fmt.Errorf("unable to determine if %s is a notebook: %w", localFullPath, err)
|
||||
}
|
||||
if !nb {
|
||||
return "", ErrIsNotNotebook{localPath}
|
||||
return "", ErrIsNotNotebook{localFullPath}
|
||||
}
|
||||
|
||||
// Upon import, notebooks are stripped of their extension.
|
||||
return strings.TrimSuffix(remotePath, filepath.Ext(localPath)), nil
|
||||
return strings.TrimSuffix(remotePath, filepath.Ext(localFullPath)), nil
|
||||
}
|
||||
|
||||
func (m *translatePaths) translateFilePath(literal, localPath, remotePath string) (string, error) {
|
||||
nb, _, err := notebook.Detect(localPath)
|
||||
func translateFilePath(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
nb, _, err := notebook.Detect(localFullPath)
|
||||
if os.IsNotExist(err) {
|
||||
return "", fmt.Errorf("file %s not found", literal)
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("unable to determine if %s is not a notebook: %w", localPath, err)
|
||||
return "", fmt.Errorf("unable to determine if %s is not a notebook: %w", localFullPath, err)
|
||||
}
|
||||
if nb {
|
||||
return "", ErrIsNotebook{localPath}
|
||||
return "", ErrIsNotebook{localFullPath}
|
||||
}
|
||||
return remotePath, nil
|
||||
}
|
||||
|
||||
func (m *translatePaths) translateJobTask(dir string, b *bundle.Bundle, task *jobs.Task) error {
|
||||
var err error
|
||||
|
||||
if task.NotebookTask != nil {
|
||||
err = m.rewritePath(dir, b, &task.NotebookTask.NotebookPath, m.translateNotebookPath)
|
||||
if target := (&ErrIsNotNotebook{}); errors.As(err, target) {
|
||||
return fmt.Errorf(`expected a notebook for "tasks.notebook_task.notebook_path" but got a file: %w`, target)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if task.SparkPythonTask != nil {
|
||||
err = m.rewritePath(dir, b, &task.SparkPythonTask.PythonFile, m.translateFilePath)
|
||||
if target := (&ErrIsNotebook{}); errors.As(err, target) {
|
||||
return fmt.Errorf(`expected a file for "tasks.spark_python_task.python_file" but got a notebook: %w`, target)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
func translateNoOp(literal, localFullPath, localRelPath, remotePath string) (string, error) {
|
||||
return localRelPath, nil
|
||||
}
|
||||
|
||||
func (m *translatePaths) translatePipelineLibrary(dir string, b *bundle.Bundle, library *pipelines.PipelineLibrary) error {
|
||||
var err error
|
||||
type transformer struct {
|
||||
// A directory path relative to which `path` will be transformed
|
||||
dir string
|
||||
// A path to transform
|
||||
path *string
|
||||
// Name of the config property where the path string is coming from
|
||||
configPath string
|
||||
// A function that performs the actual rewriting logic.
|
||||
fn rewriteFunc
|
||||
}
|
||||
|
||||
if library.Notebook != nil {
|
||||
err = m.rewritePath(dir, b, &library.Notebook.Path, m.translateNotebookPath)
|
||||
if target := (&ErrIsNotNotebook{}); errors.As(err, target) {
|
||||
return fmt.Errorf(`expected a notebook for "libraries.notebook.path" but got a file: %w`, target)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
type transformFunc func(resource any, dir string) *transformer
|
||||
|
||||
if library.File != nil {
|
||||
err = m.rewritePath(dir, b, &library.File.Path, m.translateFilePath)
|
||||
if target := (&ErrIsNotebook{}); errors.As(err, target) {
|
||||
return fmt.Errorf(`expected a file for "libraries.file.path" but got a notebook: %w`, target)
|
||||
// Apply all matches transformers for the given resource
|
||||
func (m *translatePaths) applyTransformers(funcs []transformFunc, b *bundle.Bundle, resource any, dir string) error {
|
||||
for _, transformFn := range funcs {
|
||||
transformer := transformFn(resource, dir)
|
||||
if transformer == nil {
|
||||
continue
|
||||
}
|
||||
|
||||
err := m.rewritePath(transformer.dir, b, transformer.path, transformer.fn)
|
||||
if err != nil {
|
||||
if target := (&ErrIsNotebook{}); errors.As(err, target) {
|
||||
return fmt.Errorf(`expected a file for "%s" but got a notebook: %w`, transformer.configPath, target)
|
||||
}
|
||||
if target := (&ErrIsNotNotebook{}); errors.As(err, target) {
|
||||
return fmt.Errorf(`expected a notebook for "%s" but got a file: %w`, transformer.configPath, target)
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
@ -179,36 +178,14 @@ func (m *translatePaths) translatePipelineLibrary(dir string, b *bundle.Bundle,
|
|||
func (m *translatePaths) Apply(_ context.Context, b *bundle.Bundle) error {
|
||||
m.seen = make(map[string]string)
|
||||
|
||||
for key, job := range b.Config.Resources.Jobs {
|
||||
dir, err := job.ConfigFileDirectory()
|
||||
for _, fn := range []func(*translatePaths, *bundle.Bundle) error{
|
||||
applyJobTransformers,
|
||||
applyPipelineTransformers,
|
||||
applyArtifactTransformers,
|
||||
} {
|
||||
err := fn(m, b)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||
}
|
||||
|
||||
// Do not translate job task paths if using git source
|
||||
if job.GitSource != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := 0; i < len(job.Tasks); i++ {
|
||||
err := m.translateJobTask(dir, b, &job.Tasks[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for key, pipeline := range b.Config.Resources.Pipelines {
|
||||
dir, err := pipeline.ConfigFileDirectory()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
||||
}
|
||||
|
||||
for i := 0; i < len(pipeline.Libraries); i++ {
|
||||
err := m.translatePipelineLibrary(dir, b, &pipeline.Libraries[i])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -0,0 +1,42 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
)
|
||||
|
||||
func transformArtifactPath(resource any, dir string) *transformer {
|
||||
artifact, ok := resource.(*config.Artifact)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &transformer{
|
||||
dir,
|
||||
&artifact.Path,
|
||||
"artifacts.path",
|
||||
translateNoOp,
|
||||
}
|
||||
}
|
||||
|
||||
func applyArtifactTransformers(m *translatePaths, b *bundle.Bundle) error {
|
||||
artifactTransformers := []transformFunc{
|
||||
transformArtifactPath,
|
||||
}
|
||||
|
||||
for key, artifact := range b.Config.Artifacts {
|
||||
dir, err := artifact.ConfigFileDirectory()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine directory for artifact %s: %w", key, err)
|
||||
}
|
||||
|
||||
err = m.applyTransformers(artifactTransformers, b, artifact, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,103 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
)
|
||||
|
||||
func transformNotebookTask(resource any, dir string) *transformer {
|
||||
task, ok := resource.(*jobs.Task)
|
||||
if !ok || task.NotebookTask == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &transformer{
|
||||
dir,
|
||||
&task.NotebookTask.NotebookPath,
|
||||
"tasks.notebook_task.notebook_path",
|
||||
translateNotebookPath,
|
||||
}
|
||||
}
|
||||
|
||||
func transformSparkTask(resource any, dir string) *transformer {
|
||||
task, ok := resource.(*jobs.Task)
|
||||
if !ok || task.SparkPythonTask == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &transformer{
|
||||
dir,
|
||||
&task.SparkPythonTask.PythonFile,
|
||||
"tasks.spark_python_task.python_file",
|
||||
translateFilePath,
|
||||
}
|
||||
}
|
||||
|
||||
func transformWhlLibrary(resource any, dir string) *transformer {
|
||||
library, ok := resource.(*compute.Library)
|
||||
if !ok || library.Whl == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &transformer{
|
||||
dir,
|
||||
&library.Whl,
|
||||
"libraries.whl",
|
||||
translateNoOp,
|
||||
}
|
||||
}
|
||||
|
||||
func transformJarLibrary(resource any, dir string) *transformer {
|
||||
library, ok := resource.(*compute.Library)
|
||||
if !ok || library.Jar == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &transformer{
|
||||
dir,
|
||||
&library.Jar,
|
||||
"libraries.jar",
|
||||
translateFilePath,
|
||||
}
|
||||
}
|
||||
|
||||
func applyJobTransformers(m *translatePaths, b *bundle.Bundle) error {
|
||||
jobTransformers := []transformFunc{
|
||||
transformNotebookTask,
|
||||
transformSparkTask,
|
||||
transformWhlLibrary,
|
||||
transformJarLibrary,
|
||||
}
|
||||
|
||||
for key, job := range b.Config.Resources.Jobs {
|
||||
dir, err := job.ConfigFileDirectory()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine directory for job %s: %w", key, err)
|
||||
}
|
||||
|
||||
// Do not translate job task paths if using git source
|
||||
if job.GitSource != nil {
|
||||
continue
|
||||
}
|
||||
|
||||
for i := 0; i < len(job.Tasks); i++ {
|
||||
task := &job.Tasks[i]
|
||||
err := m.applyTransformers(jobTransformers, b, task, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for j := 0; j < len(task.Libraries); j++ {
|
||||
library := &task.Libraries[j]
|
||||
err := m.applyTransformers(jobTransformers, b, library, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,60 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
)
|
||||
|
||||
func transformLibraryNotebook(resource any, dir string) *transformer {
|
||||
library, ok := resource.(*pipelines.PipelineLibrary)
|
||||
if !ok || library.Notebook == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &transformer{
|
||||
dir,
|
||||
&library.Notebook.Path,
|
||||
"libraries.notebook.path",
|
||||
translateNotebookPath,
|
||||
}
|
||||
}
|
||||
|
||||
func transformLibraryFile(resource any, dir string) *transformer {
|
||||
library, ok := resource.(*pipelines.PipelineLibrary)
|
||||
if !ok || library.File == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
return &transformer{
|
||||
dir,
|
||||
&library.File.Path,
|
||||
"libraries.file.path",
|
||||
translateFilePath,
|
||||
}
|
||||
}
|
||||
|
||||
func applyPipelineTransformers(m *translatePaths, b *bundle.Bundle) error {
|
||||
pipelineTransformers := []transformFunc{
|
||||
transformLibraryNotebook,
|
||||
transformLibraryFile,
|
||||
}
|
||||
|
||||
for key, pipeline := range b.Config.Resources.Pipelines {
|
||||
dir, err := pipeline.ConfigFileDirectory()
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to determine directory for pipeline %s: %w", key, err)
|
||||
}
|
||||
|
||||
for i := 0; i < len(pipeline.Libraries); i++ {
|
||||
library := &pipeline.Libraries[i]
|
||||
err := m.applyTransformers(pipelineTransformers, b, library, dir)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -9,7 +9,9 @@ import (
|
|||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/databricks/cli/bundle/config/mutator"
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
@ -43,7 +45,7 @@ func TestTranslatePathsSkippedWithGitSource(t *testing.T) {
|
|||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
|
@ -103,6 +105,7 @@ func TestTranslatePaths(t *testing.T) {
|
|||
touchNotebookFile(t, filepath.Join(dir, "my_job_notebook.py"))
|
||||
touchNotebookFile(t, filepath.Join(dir, "my_pipeline_notebook.py"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "my_python_file.py"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "dist", "task.jar"))
|
||||
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
|
@ -113,7 +116,7 @@ func TestTranslatePaths(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
|
@ -122,6 +125,9 @@ func TestTranslatePaths(t *testing.T) {
|
|||
NotebookTask: &jobs.NotebookTask{
|
||||
NotebookPath: "./my_job_notebook.py",
|
||||
},
|
||||
Libraries: []compute.Library{
|
||||
{Whl: "./dist/task.whl"},
|
||||
},
|
||||
},
|
||||
{
|
||||
NotebookTask: &jobs.NotebookTask{
|
||||
|
@ -143,13 +149,29 @@ func TestTranslatePaths(t *testing.T) {
|
|||
PythonFile: "./my_python_file.py",
|
||||
},
|
||||
},
|
||||
{
|
||||
SparkJarTask: &jobs.SparkJarTask{
|
||||
MainClassName: "HelloWorld",
|
||||
},
|
||||
Libraries: []compute.Library{
|
||||
{Jar: "./dist/task.jar"},
|
||||
},
|
||||
},
|
||||
{
|
||||
SparkJarTask: &jobs.SparkJarTask{
|
||||
MainClassName: "HelloWorldRemote",
|
||||
},
|
||||
Libraries: []compute.Library{
|
||||
{Jar: "dbfs:/bundle/dist/task_remote.jar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||
},
|
||||
PipelineSpec: &pipelines.PipelineSpec{
|
||||
|
@ -194,6 +216,11 @@ func TestTranslatePaths(t *testing.T) {
|
|||
"/bundle/my_job_notebook",
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[0].NotebookTask.NotebookPath,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
filepath.Join("dist", "task.whl"),
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[0].Libraries[0].Whl,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"/Users/jane.doe@databricks.com/doesnt_exist.py",
|
||||
|
@ -209,6 +236,16 @@ func TestTranslatePaths(t *testing.T) {
|
|||
"/bundle/my_python_file.py",
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[4].SparkPythonTask.PythonFile,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"/bundle/dist/task.jar",
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[5].Libraries[0].Jar,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"dbfs:/bundle/dist/task_remote.jar",
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[6].Libraries[0].Jar,
|
||||
)
|
||||
|
||||
// Assert that the path in the libraries now refer to the artifact.
|
||||
assert.Equal(
|
||||
|
@ -236,6 +273,7 @@ func TestTranslatePaths(t *testing.T) {
|
|||
func TestTranslatePathsInSubdirectories(t *testing.T) {
|
||||
dir := t.TempDir()
|
||||
touchEmptyFile(t, filepath.Join(dir, "job", "my_python_file.py"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "job", "dist", "task.jar"))
|
||||
touchEmptyFile(t, filepath.Join(dir, "pipeline", "my_python_file.py"))
|
||||
|
||||
bundle := &bundle.Bundle{
|
||||
|
@ -247,7 +285,7 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "job/resource.yml"),
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
|
@ -257,13 +295,21 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
PythonFile: "./my_python_file.py",
|
||||
},
|
||||
},
|
||||
{
|
||||
SparkJarTask: &jobs.SparkJarTask{
|
||||
MainClassName: "HelloWorld",
|
||||
},
|
||||
Libraries: []compute.Library{
|
||||
{Jar: "./dist/task.jar"},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "pipeline/resource.yml"),
|
||||
},
|
||||
|
||||
|
@ -290,6 +336,11 @@ func TestTranslatePathsInSubdirectories(t *testing.T) {
|
|||
"/bundle/job/my_python_file.py",
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[0].SparkPythonTask.PythonFile,
|
||||
)
|
||||
assert.Equal(
|
||||
t,
|
||||
"/bundle/job/dist/task.jar",
|
||||
bundle.Config.Resources.Jobs["job"].Tasks[1].Libraries[0].Jar,
|
||||
)
|
||||
|
||||
assert.Equal(
|
||||
t,
|
||||
|
@ -310,7 +361,7 @@ func TestTranslatePathsOutsideBundleRoot(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "../resource.yml"),
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
|
@ -341,7 +392,7 @@ func TestJobNotebookDoesNotExistError(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
|
@ -372,7 +423,7 @@ func TestJobFileDoesNotExistError(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
|
@ -403,7 +454,7 @@ func TestPipelineNotebookDoesNotExistError(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
||||
},
|
||||
PipelineSpec: &pipelines.PipelineSpec{
|
||||
|
@ -434,7 +485,7 @@ func TestPipelineFileDoesNotExistError(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "fake.yml"),
|
||||
},
|
||||
PipelineSpec: &pipelines.PipelineSpec{
|
||||
|
@ -469,7 +520,7 @@ func TestJobSparkPythonTaskWithNotebookSourceError(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
|
@ -504,7 +555,7 @@ func TestJobNotebookTaskWithFileSourceError(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"job": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||
},
|
||||
JobSettings: &jobs.JobSettings{
|
||||
|
@ -539,7 +590,7 @@ func TestPipelineNotebookLibraryWithFileSourceError(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||
},
|
||||
PipelineSpec: &pipelines.PipelineSpec{
|
||||
|
@ -574,7 +625,7 @@ func TestPipelineFileLibraryWithNotebookSourceError(t *testing.T) {
|
|||
Resources: config.Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"pipeline": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: filepath.Join(dir, "resource.yml"),
|
||||
},
|
||||
PipelineSpec: &pipelines.PipelineSpec{
|
||||
|
|
|
@ -0,0 +1,29 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
)
|
||||
|
||||
type validateGitDetails struct{}
|
||||
|
||||
func ValidateGitDetails() *validateGitDetails {
|
||||
return &validateGitDetails{}
|
||||
}
|
||||
|
||||
func (m *validateGitDetails) Name() string {
|
||||
return "ValidateGitDetails"
|
||||
}
|
||||
|
||||
func (m *validateGitDetails) Apply(ctx context.Context, b *bundle.Bundle) error {
|
||||
if b.Config.Bundle.Git.Branch == "" || b.Config.Bundle.Git.ActualBranch == "" {
|
||||
return nil
|
||||
}
|
||||
|
||||
if b.Config.Bundle.Git.Branch != b.Config.Bundle.Git.ActualBranch && !b.Config.Bundle.Force {
|
||||
return fmt.Errorf("not on the right Git branch:\n expected according to configuration: %s\n actual: %s\nuse --force to override", b.Config.Bundle.Git.Branch, b.Config.Bundle.Git.ActualBranch)
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,65 @@
|
|||
package mutator
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestValidateGitDetailsMatchingBranches(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Git: config.Git{
|
||||
Branch: "main",
|
||||
ActualBranch: "main",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := ValidateGitDetails()
|
||||
err := m.Apply(context.Background(), bundle)
|
||||
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestValidateGitDetailsNonMatchingBranches(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Git: config.Git{
|
||||
Branch: "main",
|
||||
ActualBranch: "feature",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := ValidateGitDetails()
|
||||
err := m.Apply(context.Background(), bundle)
|
||||
|
||||
expectedError := "not on the right Git branch:\n expected according to configuration: main\n actual: feature\nuse --force to override"
|
||||
assert.EqualError(t, err, expectedError)
|
||||
}
|
||||
|
||||
func TestValidateGitDetailsNotUsingGit(t *testing.T) {
|
||||
bundle := &bundle.Bundle{
|
||||
Config: config.Root{
|
||||
Bundle: config.Bundle{
|
||||
Git: config.Git{
|
||||
Branch: "main",
|
||||
ActualBranch: "",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
m := ValidateGitDetails()
|
||||
err := m.Apply(context.Background(), bundle)
|
||||
|
||||
assert.NoError(t, err)
|
||||
}
|
|
@ -1,4 +1,4 @@
|
|||
package resources
|
||||
package paths
|
||||
|
||||
import (
|
||||
"fmt"
|
|
@ -11,8 +11,9 @@ type Resources struct {
|
|||
Jobs map[string]*resources.Job `json:"jobs,omitempty"`
|
||||
Pipelines map[string]*resources.Pipeline `json:"pipelines,omitempty"`
|
||||
|
||||
Models map[string]*resources.MlflowModel `json:"models,omitempty"`
|
||||
Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"`
|
||||
Models map[string]*resources.MlflowModel `json:"models,omitempty"`
|
||||
Experiments map[string]*resources.MlflowExperiment `json:"experiments,omitempty"`
|
||||
ModelServingEndpoints map[string]*resources.ModelServingEndpoint `json:"model_serving_endpoints,omitempty"`
|
||||
}
|
||||
|
||||
type UniqueResourceIdTracker struct {
|
||||
|
@ -93,6 +94,19 @@ func (r *Resources) VerifyUniqueResourceIdentifiers() (*UniqueResourceIdTracker,
|
|||
tracker.Type[k] = "mlflow_experiment"
|
||||
tracker.ConfigPath[k] = r.Experiments[k].ConfigFilePath
|
||||
}
|
||||
for k := range r.ModelServingEndpoints {
|
||||
if _, ok := tracker.Type[k]; ok {
|
||||
return tracker, fmt.Errorf("multiple resources named %s (%s at %s, %s at %s)",
|
||||
k,
|
||||
tracker.Type[k],
|
||||
tracker.ConfigPath[k],
|
||||
"model_serving_endpoint",
|
||||
r.ModelServingEndpoints[k].ConfigFilePath,
|
||||
)
|
||||
}
|
||||
tracker.Type[k] = "model_serving_endpoint"
|
||||
tracker.ConfigPath[k] = r.ModelServingEndpoints[k].ConfigFilePath
|
||||
}
|
||||
return tracker, nil
|
||||
}
|
||||
|
||||
|
@ -112,4 +126,18 @@ func (r *Resources) SetConfigFilePath(path string) {
|
|||
for _, e := range r.Experiments {
|
||||
e.ConfigFilePath = path
|
||||
}
|
||||
for _, e := range r.ModelServingEndpoints {
|
||||
e.ConfigFilePath = path
|
||||
}
|
||||
}
|
||||
|
||||
// MergeJobClusters iterates over all jobs and merges their job clusters.
|
||||
// This is called after applying the target overrides.
|
||||
func (r *Resources) MergeJobClusters() error {
|
||||
for _, job := range r.Jobs {
|
||||
if err := job.MergeJobClusters(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -1,12 +1,49 @@
|
|||
package resources
|
||||
|
||||
import "github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
import (
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
type Job struct {
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
|
||||
Paths
|
||||
paths.Paths
|
||||
|
||||
*jobs.JobSettings
|
||||
}
|
||||
|
||||
// MergeJobClusters merges job clusters with the same key.
|
||||
// The job clusters field is a slice, and as such, overrides are appended to it.
|
||||
// We can identify a job cluster by its key, however, so we can use this key
|
||||
// to figure out which definitions are actually overrides and merge them.
|
||||
func (j *Job) MergeJobClusters() error {
|
||||
keys := make(map[string]*jobs.JobCluster)
|
||||
output := make([]jobs.JobCluster, 0, len(j.JobClusters))
|
||||
|
||||
// Target overrides are always appended, so we can iterate in natural order to
|
||||
// first find the base definition, and merge instances we encounter later.
|
||||
for i := range j.JobClusters {
|
||||
key := j.JobClusters[i].JobClusterKey
|
||||
|
||||
// Register job cluster with key if not yet seen before.
|
||||
ref, ok := keys[key]
|
||||
if !ok {
|
||||
output = append(output, j.JobClusters[i])
|
||||
keys[key] = &j.JobClusters[i]
|
||||
continue
|
||||
}
|
||||
|
||||
// Merge this instance into the reference.
|
||||
err := mergo.Merge(ref, &j.JobClusters[i], mergo.WithOverride, mergo.WithAppendSlice)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
// Overwrite resulting slice.
|
||||
j.JobClusters = output
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -0,0 +1,57 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestJobMergeJobClusters(t *testing.T) {
|
||||
j := &Job{
|
||||
JobSettings: &jobs.JobSettings{
|
||||
JobClusters: []jobs.JobCluster{
|
||||
{
|
||||
JobClusterKey: "foo",
|
||||
NewCluster: &compute.ClusterSpec{
|
||||
SparkVersion: "13.3.x-scala2.12",
|
||||
NodeTypeId: "i3.xlarge",
|
||||
NumWorkers: 2,
|
||||
},
|
||||
},
|
||||
{
|
||||
JobClusterKey: "bar",
|
||||
NewCluster: &compute.ClusterSpec{
|
||||
SparkVersion: "10.4.x-scala2.12",
|
||||
},
|
||||
},
|
||||
{
|
||||
JobClusterKey: "foo",
|
||||
NewCluster: &compute.ClusterSpec{
|
||||
NodeTypeId: "i3.2xlarge",
|
||||
NumWorkers: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
err := j.MergeJobClusters()
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, j.JobClusters, 2)
|
||||
assert.Equal(t, "foo", j.JobClusters[0].JobClusterKey)
|
||||
assert.Equal(t, "bar", j.JobClusters[1].JobClusterKey)
|
||||
|
||||
// This job cluster was merged with a subsequent one.
|
||||
jc0 := j.JobClusters[0].NewCluster
|
||||
assert.Equal(t, "13.3.x-scala2.12", jc0.SparkVersion)
|
||||
assert.Equal(t, "i3.2xlarge", jc0.NodeTypeId)
|
||||
assert.Equal(t, 4, jc0.NumWorkers)
|
||||
|
||||
// This job cluster was left untouched.
|
||||
jc1 := j.JobClusters[1].NewCluster
|
||||
assert.Equal(t, "10.4.x-scala2.12", jc1.SparkVersion)
|
||||
}
|
|
@ -1,11 +1,14 @@
|
|||
package resources
|
||||
|
||||
import "github.com/databricks/databricks-sdk-go/service/ml"
|
||||
import (
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
)
|
||||
|
||||
type MlflowExperiment struct {
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
|
||||
Paths
|
||||
paths.Paths
|
||||
|
||||
*ml.Experiment
|
||||
}
|
||||
|
|
|
@ -1,11 +1,14 @@
|
|||
package resources
|
||||
|
||||
import "github.com/databricks/databricks-sdk-go/service/ml"
|
||||
import (
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/databricks-sdk-go/service/ml"
|
||||
)
|
||||
|
||||
type MlflowModel struct {
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
|
||||
Paths
|
||||
paths.Paths
|
||||
|
||||
*ml.Model
|
||||
}
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
package resources
|
||||
|
||||
import (
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/databricks-sdk-go/service/serving"
|
||||
)
|
||||
|
||||
type ModelServingEndpoint struct {
|
||||
// This represents the input args for terraform, and will get converted
|
||||
// to a HCL representation for CRUD
|
||||
*serving.CreateServingEndpoint
|
||||
|
||||
// This represents the id (ie serving_endpoint_id) that can be used
|
||||
// as a reference in other resources. This value is returned by terraform.
|
||||
ID string
|
||||
|
||||
// Local path where the bundle is defined. All bundle resources include
|
||||
// this for interpolation purposes.
|
||||
paths.Paths
|
||||
|
||||
// This is a resource agnostic implementation of permissions for ACLs.
|
||||
// Implementation could be different based on the resource type.
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
}
|
|
@ -1,12 +1,15 @@
|
|||
package resources
|
||||
|
||||
import "github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
import (
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/databricks-sdk-go/service/pipelines"
|
||||
)
|
||||
|
||||
type Pipeline struct {
|
||||
ID string `json:"id,omitempty" bundle:"readonly"`
|
||||
Permissions []Permission `json:"permissions,omitempty"`
|
||||
|
||||
Paths
|
||||
paths.Paths
|
||||
|
||||
*pipelines.PipelineSpec
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package config
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/paths"
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
@ -11,21 +12,21 @@ func TestVerifyUniqueResourceIdentifiers(t *testing.T) {
|
|||
r := Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"bar": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "bar.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
Experiments: map[string]*resources.MlflowExperiment{
|
||||
"foo": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo2.yml",
|
||||
},
|
||||
},
|
||||
|
@ -39,14 +40,14 @@ func TestVerifySafeMerge(t *testing.T) {
|
|||
r := Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"bar": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "bar.yml",
|
||||
},
|
||||
},
|
||||
|
@ -55,7 +56,7 @@ func TestVerifySafeMerge(t *testing.T) {
|
|||
other := Resources{
|
||||
Pipelines: map[string]*resources.Pipeline{
|
||||
"foo": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo2.yml",
|
||||
},
|
||||
},
|
||||
|
@ -69,14 +70,14 @@ func TestVerifySafeMergeForSameResourceType(t *testing.T) {
|
|||
r := Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo.yml",
|
||||
},
|
||||
},
|
||||
},
|
||||
Models: map[string]*resources.MlflowModel{
|
||||
"bar": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "bar.yml",
|
||||
},
|
||||
},
|
||||
|
@ -85,7 +86,7 @@ func TestVerifySafeMergeForSameResourceType(t *testing.T) {
|
|||
other := Resources{
|
||||
Jobs: map[string]*resources.Job{
|
||||
"foo": {
|
||||
Paths: resources.Paths{
|
||||
Paths: paths.Paths{
|
||||
ConfigFilePath: "foo2.yml",
|
||||
},
|
||||
},
|
||||
|
|
|
@ -7,16 +7,44 @@ import (
|
|||
"strings"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
"github.com/ghodss/yaml"
|
||||
"github.com/imdario/mergo"
|
||||
)
|
||||
|
||||
// FileName is the name of bundle configuration file.
|
||||
const FileName = "bundle.yml"
|
||||
type ConfigFileNames []string
|
||||
|
||||
// FileNames contains allowed names of bundle configuration files.
|
||||
var FileNames = ConfigFileNames{"databricks.yml", "databricks.yaml", "bundle.yml", "bundle.yaml"}
|
||||
|
||||
func (c ConfigFileNames) FindInPath(path string) (string, error) {
|
||||
result := ""
|
||||
var firstErr error
|
||||
|
||||
for _, file := range c {
|
||||
filePath := filepath.Join(path, file)
|
||||
_, err := os.Stat(filePath)
|
||||
if err == nil {
|
||||
if result != "" {
|
||||
return "", fmt.Errorf("multiple bundle root configuration files found in %s", path)
|
||||
}
|
||||
result = filePath
|
||||
} else {
|
||||
if firstErr == nil {
|
||||
firstErr = err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if result == "" {
|
||||
return "", firstErr
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
type Root struct {
|
||||
// Path contains the directory path to the root of the bundle.
|
||||
// It is set when loading `bundle.yml`.
|
||||
// It is set when loading `databricks.yml`.
|
||||
Path string `json:"-" bundle:"readonly"`
|
||||
|
||||
// Contains user defined variables
|
||||
|
@ -24,14 +52,11 @@ type Root struct {
|
|||
|
||||
// Bundle contains details about this bundle, such as its name,
|
||||
// version of the spec (TODO), default cluster, default warehouse, etc.
|
||||
Bundle Bundle `json:"bundle"`
|
||||
Bundle Bundle `json:"bundle,omitempty"`
|
||||
|
||||
// Include specifies a list of patterns of file names to load and
|
||||
// merge into the this configuration. If not set in `bundle.yml`,
|
||||
// it defaults to loading `*.yml` and `*/*.yml`.
|
||||
//
|
||||
// Also see [mutator.DefineDefaultInclude].
|
||||
//
|
||||
// merge into the this configuration. Only includes defined in the root
|
||||
// `databricks.yml` are processed. Defaults to an empty list.
|
||||
Include []string `json:"include,omitempty"`
|
||||
|
||||
// Workspace contains details about the workspace to connect to
|
||||
|
@ -39,17 +64,28 @@ type Root struct {
|
|||
Workspace Workspace `json:"workspace,omitempty"`
|
||||
|
||||
// Artifacts contains a description of all code artifacts in this bundle.
|
||||
Artifacts map[string]*Artifact `json:"artifacts,omitempty"`
|
||||
Artifacts Artifacts `json:"artifacts,omitempty"`
|
||||
|
||||
// Resources contains a description of all Databricks resources
|
||||
// to deploy in this bundle (e.g. jobs, pipelines, etc.).
|
||||
Resources Resources `json:"resources,omitempty"`
|
||||
|
||||
// Environments can be used to differentiate settings and resources between
|
||||
// bundle deployment environments (e.g. development, staging, production).
|
||||
// Targets can be used to differentiate settings and resources between
|
||||
// bundle deployment targets (e.g. development, staging, production).
|
||||
// If not specified, the code below initializes this field with a
|
||||
// single default-initialized environment called "default".
|
||||
Environments map[string]*Environment `json:"environments,omitempty"`
|
||||
// single default-initialized target called "default".
|
||||
Targets map[string]*Target `json:"targets,omitempty"`
|
||||
|
||||
// DEPRECATED. Left for backward compatibility with Targets
|
||||
Environments map[string]*Target `json:"environments,omitempty"`
|
||||
|
||||
// Sync section specifies options for files synchronization
|
||||
Sync Sync `json:"sync,omitempty"`
|
||||
|
||||
// RunAs section allows to define an execution identity for jobs and pipelines runs
|
||||
RunAs *jobs.JobRunAs `json:"run_as,omitempty"`
|
||||
|
||||
Experimental *Experimental `json:"experimental,omitempty"`
|
||||
}
|
||||
|
||||
func Load(path string) (*Root, error) {
|
||||
|
@ -62,7 +98,10 @@ func Load(path string) (*Root, error) {
|
|||
|
||||
// If we were given a directory, assume this is the bundle root.
|
||||
if stat.IsDir() {
|
||||
path = filepath.Join(path, FileName)
|
||||
path, err = FileNames.FindInPath(path)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
if err := r.Load(path); err != nil {
|
||||
|
@ -76,14 +115,21 @@ func Load(path string) (*Root, error) {
|
|||
// was loaded from in configuration leafs that require it.
|
||||
func (r *Root) SetConfigFilePath(path string) {
|
||||
r.Resources.SetConfigFilePath(path)
|
||||
if r.Environments != nil {
|
||||
for _, env := range r.Environments {
|
||||
if r.Artifacts != nil {
|
||||
r.Artifacts.SetConfigFilePath(path)
|
||||
}
|
||||
|
||||
if r.Targets != nil {
|
||||
for _, env := range r.Targets {
|
||||
if env == nil {
|
||||
continue
|
||||
}
|
||||
if env.Resources != nil {
|
||||
env.Resources.SetConfigFilePath(path)
|
||||
}
|
||||
if env.Artifacts != nil {
|
||||
env.Artifacts.SetConfigFilePath(path)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -121,6 +167,15 @@ func (r *Root) Load(path string) error {
|
|||
return fmt.Errorf("failed to load %s: %w", path, err)
|
||||
}
|
||||
|
||||
if r.Environments != nil && r.Targets != nil {
|
||||
return fmt.Errorf("both 'environments' and 'targets' are specified, only 'targets' should be used: %s", path)
|
||||
}
|
||||
|
||||
if r.Environments != nil {
|
||||
//TODO: add a command line notice that this is a deprecated option.
|
||||
r.Targets = r.Environments
|
||||
}
|
||||
|
||||
r.Path = filepath.Dir(path)
|
||||
r.SetConfigFilePath(path)
|
||||
|
||||
|
@ -129,57 +184,68 @@ func (r *Root) Load(path string) error {
|
|||
}
|
||||
|
||||
func (r *Root) Merge(other *Root) error {
|
||||
err := r.Sync.Merge(r, other)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
other.Sync = Sync{}
|
||||
|
||||
// TODO: when hooking into merge semantics, disallow setting path on the target instance.
|
||||
other.Path = ""
|
||||
|
||||
// Check for safe merge, protecting against duplicate resource identifiers
|
||||
err := r.Resources.VerifySafeMerge(&other.Resources)
|
||||
err = r.Resources.VerifySafeMerge(&other.Resources)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: define and test semantics for merging.
|
||||
return mergo.MergeWithOverwrite(r, other)
|
||||
return mergo.Merge(r, other, mergo.WithOverride)
|
||||
}
|
||||
|
||||
func (r *Root) MergeEnvironment(env *Environment) error {
|
||||
func (r *Root) MergeTargetOverrides(target *Target) error {
|
||||
var err error
|
||||
|
||||
// Environment may be nil if it's empty.
|
||||
if env == nil {
|
||||
// Target may be nil if it's empty.
|
||||
if target == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if env.Bundle != nil {
|
||||
err = mergo.MergeWithOverwrite(&r.Bundle, env.Bundle)
|
||||
if target.Bundle != nil {
|
||||
err = mergo.Merge(&r.Bundle, target.Bundle, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if env.Workspace != nil {
|
||||
err = mergo.MergeWithOverwrite(&r.Workspace, env.Workspace)
|
||||
if target.Workspace != nil {
|
||||
err = mergo.Merge(&r.Workspace, target.Workspace, mergo.WithOverride)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if env.Artifacts != nil {
|
||||
err = mergo.Merge(&r.Artifacts, env.Artifacts, mergo.WithAppendSlice)
|
||||
if target.Artifacts != nil {
|
||||
err = mergo.Merge(&r.Artifacts, target.Artifacts, mergo.WithOverride, mergo.WithAppendSlice)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if env.Resources != nil {
|
||||
err = mergo.Merge(&r.Resources, env.Resources, mergo.WithAppendSlice)
|
||||
if target.Resources != nil {
|
||||
err = mergo.Merge(&r.Resources, target.Resources, mergo.WithOverride, mergo.WithAppendSlice)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = r.Resources.MergeJobClusters()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if env.Variables != nil {
|
||||
for k, v := range env.Variables {
|
||||
if target.Variables != nil {
|
||||
for k, v := range target.Variables {
|
||||
variable, ok := r.Variables[k]
|
||||
if !ok {
|
||||
return fmt.Errorf("variable %s is not defined but is assigned a value", k)
|
||||
|
@ -190,12 +256,28 @@ func (r *Root) MergeEnvironment(env *Environment) error {
|
|||
}
|
||||
}
|
||||
|
||||
if env.Mode != "" {
|
||||
r.Bundle.Mode = env.Mode
|
||||
if target.RunAs != nil {
|
||||
r.RunAs = target.RunAs
|
||||
}
|
||||
|
||||
if env.ComputeID != "" {
|
||||
r.Bundle.ComputeID = env.ComputeID
|
||||
if target.Mode != "" {
|
||||
r.Bundle.Mode = target.Mode
|
||||
}
|
||||
|
||||
if target.ComputeID != "" {
|
||||
r.Bundle.ComputeID = target.ComputeID
|
||||
}
|
||||
|
||||
git := &r.Bundle.Git
|
||||
if target.Git.Branch != "" {
|
||||
git.Branch = target.Git.Branch
|
||||
git.Inferred = false
|
||||
}
|
||||
if target.Git.Commit != "" {
|
||||
git.Commit = target.Git.Commit
|
||||
}
|
||||
if target.Git.OriginURL != "" {
|
||||
git.OriginURL = target.Git.OriginURL
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -2,7 +2,11 @@ package config
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"runtime"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
|
@ -26,7 +30,7 @@ func TestRootMarshalUnmarshal(t *testing.T) {
|
|||
|
||||
func TestRootLoad(t *testing.T) {
|
||||
root := &Root{}
|
||||
err := root.Load("../tests/basic/bundle.yml")
|
||||
err := root.Load("../tests/basic/databricks.yml")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "basic", root.Bundle.Name)
|
||||
}
|
||||
|
@ -53,7 +57,7 @@ func TestRootMergeStruct(t *testing.T) {
|
|||
func TestRootMergeMap(t *testing.T) {
|
||||
root := &Root{
|
||||
Path: "path",
|
||||
Environments: map[string]*Environment{
|
||||
Targets: map[string]*Target{
|
||||
"development": {
|
||||
Workspace: &Workspace{
|
||||
Host: "foo",
|
||||
|
@ -64,7 +68,7 @@ func TestRootMergeMap(t *testing.T) {
|
|||
}
|
||||
other := &Root{
|
||||
Path: "path",
|
||||
Environments: map[string]*Environment{
|
||||
Targets: map[string]*Target{
|
||||
"development": {
|
||||
Workspace: &Workspace{
|
||||
Host: "bar",
|
||||
|
@ -73,18 +77,18 @@ func TestRootMergeMap(t *testing.T) {
|
|||
},
|
||||
}
|
||||
assert.NoError(t, root.Merge(other))
|
||||
assert.Equal(t, &Workspace{Host: "bar", Profile: "profile"}, root.Environments["development"].Workspace)
|
||||
assert.Equal(t, &Workspace{Host: "bar", Profile: "profile"}, root.Targets["development"].Workspace)
|
||||
}
|
||||
|
||||
func TestDuplicateIdOnLoadReturnsError(t *testing.T) {
|
||||
root := &Root{}
|
||||
err := root.Load("./testdata/duplicate_resource_names_in_root/bundle.yml")
|
||||
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/bundle.yml, pipeline at ./testdata/duplicate_resource_names_in_root/bundle.yml)")
|
||||
err := root.Load("./testdata/duplicate_resource_names_in_root/databricks.yml")
|
||||
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_names_in_root/databricks.yml, pipeline at ./testdata/duplicate_resource_names_in_root/databricks.yml)")
|
||||
}
|
||||
|
||||
func TestDuplicateIdOnMergeReturnsError(t *testing.T) {
|
||||
root := &Root{}
|
||||
err := root.Load("./testdata/duplicate_resource_name_in_subconfiguration/bundle.yml")
|
||||
err := root.Load("./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml")
|
||||
require.NoError(t, err)
|
||||
|
||||
other := &Root{}
|
||||
|
@ -92,7 +96,7 @@ func TestDuplicateIdOnMergeReturnsError(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
err = root.Merge(other)
|
||||
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_name_in_subconfiguration/bundle.yml, pipeline at ./testdata/duplicate_resource_name_in_subconfiguration/resources.yml)")
|
||||
assert.ErrorContains(t, err, "multiple resources named foo (job at ./testdata/duplicate_resource_name_in_subconfiguration/databricks.yml, pipeline at ./testdata/duplicate_resource_name_in_subconfiguration/resources.yml)")
|
||||
}
|
||||
|
||||
func TestInitializeVariables(t *testing.T) {
|
||||
|
@ -155,11 +159,70 @@ func TestInitializeVariablesUndefinedVariables(t *testing.T) {
|
|||
assert.ErrorContains(t, err, "variable bar has not been defined")
|
||||
}
|
||||
|
||||
func TestRootMergeEnvironmentWithMode(t *testing.T) {
|
||||
func TestRootMergeTargetOverridesWithMode(t *testing.T) {
|
||||
root := &Root{
|
||||
Bundle: Bundle{},
|
||||
}
|
||||
env := &Environment{Mode: Development}
|
||||
require.NoError(t, root.MergeEnvironment(env))
|
||||
env := &Target{Mode: Development}
|
||||
require.NoError(t, root.MergeTargetOverrides(env))
|
||||
assert.Equal(t, Development, root.Bundle.Mode)
|
||||
}
|
||||
|
||||
func TestConfigFileNames_FindInPath(t *testing.T) {
|
||||
testCases := []struct {
|
||||
name string
|
||||
files []string
|
||||
expected string
|
||||
err string
|
||||
}{
|
||||
{
|
||||
name: "file found",
|
||||
files: []string{"databricks.yml"},
|
||||
expected: "BASE/databricks.yml",
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
name: "file found",
|
||||
files: []string{"bundle.yml"},
|
||||
expected: "BASE/bundle.yml",
|
||||
err: "",
|
||||
},
|
||||
{
|
||||
name: "multiple files found",
|
||||
files: []string{"databricks.yaml", "bundle.yml"},
|
||||
expected: "",
|
||||
err: "multiple bundle root configuration files found",
|
||||
},
|
||||
{
|
||||
name: "file not found",
|
||||
files: []string{},
|
||||
expected: "",
|
||||
err: "no such file or directory",
|
||||
},
|
||||
}
|
||||
|
||||
if runtime.GOOS == "windows" {
|
||||
testCases[3].err = "The system cannot find the file specified."
|
||||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
projectDir := t.TempDir()
|
||||
for _, file := range tc.files {
|
||||
f1, _ := os.Create(filepath.Join(projectDir, file))
|
||||
f1.Close()
|
||||
}
|
||||
|
||||
result, err := FileNames.FindInPath(projectDir)
|
||||
|
||||
expected := strings.Replace(tc.expected, "BASE/", projectDir+string(os.PathSeparator), 1)
|
||||
assert.Equal(t, expected, result)
|
||||
|
||||
if tc.err != "" {
|
||||
assert.ErrorContains(t, err, tc.err)
|
||||
} else {
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,31 @@
|
|||
package config
|
||||
|
||||
import "path/filepath"
|
||||
|
||||
type Sync struct {
|
||||
// Include contains a list of globs evaluated relative to the bundle root path
|
||||
// to explicitly include files that were excluded by the user's gitignore.
|
||||
Include []string `json:"include,omitempty"`
|
||||
|
||||
// Exclude contains a list of globs evaluated relative to the bundle root path
|
||||
// to explicitly exclude files that were included by
|
||||
// 1) the default that observes the user's gitignore, or
|
||||
// 2) the `Include` field above.
|
||||
Exclude []string `json:"exclude,omitempty"`
|
||||
}
|
||||
|
||||
func (s *Sync) Merge(root *Root, other *Root) error {
|
||||
path, err := filepath.Rel(root.Path, other.Path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, include := range other.Sync.Include {
|
||||
s.Include = append(s.Include, filepath.Join(path, include))
|
||||
}
|
||||
|
||||
for _, exclude := range other.Sync.Exclude {
|
||||
s.Exclude = append(s.Exclude, filepath.Join(path, exclude))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -0,0 +1,50 @@
|
|||
package config
|
||||
|
||||
import "github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
|
||||
type Mode string
|
||||
|
||||
// Target defines overrides for a single target.
|
||||
// This structure is recursively merged into the root configuration.
|
||||
type Target struct {
|
||||
// Default marks that this target must be used if one isn't specified
|
||||
// by the user (through target variable or command line argument).
|
||||
Default bool `json:"default,omitempty"`
|
||||
|
||||
// Determines the mode of the target.
|
||||
// For example, 'mode: development' can be used for deployments for
|
||||
// development purposes.
|
||||
Mode Mode `json:"mode,omitempty"`
|
||||
|
||||
// Overrides the compute used for jobs and other supported assets.
|
||||
ComputeID string `json:"compute_id,omitempty"`
|
||||
|
||||
Bundle *Bundle `json:"bundle,omitempty"`
|
||||
|
||||
Workspace *Workspace `json:"workspace,omitempty"`
|
||||
|
||||
Artifacts Artifacts `json:"artifacts,omitempty"`
|
||||
|
||||
Resources *Resources `json:"resources,omitempty"`
|
||||
|
||||
// Override default values for defined variables
|
||||
// Does not permit defining new variables or redefining existing ones
|
||||
// in the scope of an target
|
||||
Variables map[string]string `json:"variables,omitempty"`
|
||||
|
||||
Git Git `json:"git,omitempty"`
|
||||
|
||||
RunAs *jobs.JobRunAs `json:"run_as,omitempty"`
|
||||
}
|
||||
|
||||
const (
|
||||
// Development mode: deployments done purely for running things in development.
|
||||
// Any deployed resources will be marked as "dev" and might be hidden or cleaned up.
|
||||
Development Mode = "development"
|
||||
|
||||
// Production mode: deployments done for production purposes.
|
||||
// Any deployed resources will not be changed but this mode will enable
|
||||
// various strictness checks to make sure that a deployment is correctly setup
|
||||
// for production purposes.
|
||||
Production Mode = "production"
|
||||
)
|
|
@ -18,7 +18,7 @@ type Variable struct {
|
|||
// resolved in the following priority order (from highest to lowest)
|
||||
//
|
||||
// 1. Command line flag. For example: `--var="foo=bar"`
|
||||
// 2. Environment variable. eg: BUNDLE_VAR_foo=bar
|
||||
// 2. Target variable. eg: BUNDLE_VAR_foo=bar
|
||||
// 3. Default value as defined in the applicable environments block
|
||||
// 4. Default value defined in variable definition
|
||||
// 5. Throw error, since if no default value is defined, then the variable
|
||||
|
|
|
@ -21,8 +21,13 @@ type Workspace struct {
|
|||
//
|
||||
|
||||
// Generic attributes.
|
||||
Host string `json:"host,omitempty"`
|
||||
Profile string `json:"profile,omitempty"`
|
||||
Host string `json:"host,omitempty"`
|
||||
Profile string `json:"profile,omitempty"`
|
||||
AuthType string `json:"auth_type,omitempty"`
|
||||
MetadataServiceURL string `json:"metadata_service_url,omitempty" bundle:"internal"`
|
||||
|
||||
// OAuth specific attributes.
|
||||
ClientID string `json:"client_id,omitempty"`
|
||||
|
||||
// Google specific attributes.
|
||||
GoogleServiceAccount string `json:"google_service_account,omitempty"`
|
||||
|
@ -37,10 +42,10 @@ type Workspace struct {
|
|||
|
||||
// CurrentUser holds the current user.
|
||||
// This is set after configuration initialization.
|
||||
CurrentUser *iam.User `json:"current_user,omitempty" bundle:"readonly"`
|
||||
CurrentUser *User `json:"current_user,omitempty" bundle:"readonly"`
|
||||
|
||||
// Remote workspace base path for deployment state, for artifacts, as synchronization target.
|
||||
// This defaults to "~/.bundle/${bundle.name}/${bundle.environment}" where "~" expands to
|
||||
// This defaults to "~/.bundle/${bundle.name}/${bundle.target}" where "~" expands to
|
||||
// the current user's home directory in the workspace (e.g. `/Users/jane@doe.com`).
|
||||
RootPath string `json:"root_path,omitempty"`
|
||||
|
||||
|
@ -57,11 +62,23 @@ type Workspace struct {
|
|||
StatePath string `json:"state_path,omitempty"`
|
||||
}
|
||||
|
||||
type User struct {
|
||||
// A short name for the user, based on the user's UserName.
|
||||
ShortName string `json:"short_name,omitempty" bundle:"readonly"`
|
||||
|
||||
*iam.User
|
||||
}
|
||||
|
||||
func (w *Workspace) Client() (*databricks.WorkspaceClient, error) {
|
||||
cfg := databricks.Config{
|
||||
// Generic
|
||||
Host: w.Host,
|
||||
Profile: w.Profile,
|
||||
Host: w.Host,
|
||||
Profile: w.Profile,
|
||||
AuthType: w.AuthType,
|
||||
MetadataServiceURL: w.MetadataServiceURL,
|
||||
|
||||
// OAuth
|
||||
ClientID: w.ClientID,
|
||||
|
||||
// Google
|
||||
GoogleServiceAccount: w.GoogleServiceAccount,
|
||||
|
|
|
@ -27,7 +27,7 @@ func (m *delete) Apply(ctx context.Context, b *bundle.Bundle) error {
|
|||
|
||||
red := color.New(color.FgRed).SprintFunc()
|
||||
if !b.AutoApprove {
|
||||
proceed, err := cmdio.Ask(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?: ", b.Config.Workspace.RootPath, red("deleted permanently!")))
|
||||
proceed, err := cmdio.AskYesOrNo(ctx, fmt.Sprintf("\n%s and all files in it will be %s Proceed?", b.Config.Workspace.RootPath, red("deleted permanently!")))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue