mirror of https://github.com/databricks/cli.git
Compare commits
5 Commits
bb58bd1341
...
039193339f
Author | SHA1 | Date |
---|---|---|
|
039193339f | |
|
67557b7c84 | |
|
ac80d3dfcb | |
|
7665c639bd | |
|
6c57683dc6 |
|
@ -67,7 +67,12 @@ func rewriteComputeIdToClusterId(v dyn.Value, p dyn.Path) (dyn.Value, diag.Diagn
|
|||
// Drop the "compute_id" key.
|
||||
vout, err := dyn.Walk(nv, func(p dyn.Path, v dyn.Value) (dyn.Value, error) {
|
||||
switch len(p) {
|
||||
case 0, 1:
|
||||
case 0:
|
||||
return v, nil
|
||||
case 1:
|
||||
if p[0] == dyn.Key("compute_id") {
|
||||
return v, dyn.ErrDrop
|
||||
}
|
||||
return v, nil
|
||||
case 2:
|
||||
if p[1] == dyn.Key("compute_id") {
|
||||
|
|
|
@ -41,8 +41,13 @@ func TestComputeIdToClusterIdInTargetOverride(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
diags := bundle.Apply(context.Background(), b, bundle.Seq(mutator.ComputeIdToClusterId(), mutator.SelectTarget("dev")))
|
||||
diags := bundle.Apply(context.Background(), b, mutator.ComputeIdToClusterId())
|
||||
assert.NoError(t, diags.Error())
|
||||
assert.Empty(t, b.Config.Targets["dev"].ComputeId)
|
||||
|
||||
diags = diags.Extend(bundle.Apply(context.Background(), b, mutator.SelectTarget("dev")))
|
||||
assert.NoError(t, diags.Error())
|
||||
|
||||
assert.Equal(t, "compute-id-dev", b.Config.Bundle.ClusterId)
|
||||
assert.Empty(t, b.Config.Bundle.ComputeId)
|
||||
|
||||
|
|
|
@ -8,9 +8,12 @@ import (
|
|||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/log"
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
)
|
||||
|
||||
type upload struct{}
|
||||
type upload struct {
|
||||
outputHandler sync.OutputHandler
|
||||
}
|
||||
|
||||
func (m *upload) Name() string {
|
||||
return "files.Upload"
|
||||
|
@ -18,11 +21,18 @@ func (m *upload) Name() string {
|
|||
|
||||
func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Uploading bundle files to %s...", b.Config.Workspace.FilePath))
|
||||
sync, err := GetSync(ctx, bundle.ReadOnly(b))
|
||||
opts, err := GetSyncOptions(ctx, bundle.ReadOnly(b))
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
|
||||
opts.OutputHandler = m.outputHandler
|
||||
sync, err := sync.New(ctx, *opts)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
}
|
||||
defer sync.Close()
|
||||
|
||||
b.Files, err = sync.RunOnce(ctx)
|
||||
if err != nil {
|
||||
return diag.FromErr(err)
|
||||
|
@ -32,6 +42,6 @@ func (m *upload) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
return nil
|
||||
}
|
||||
|
||||
func Upload() bundle.Mutator {
|
||||
return &upload{}
|
||||
func Upload(outputHandler sync.OutputHandler) bundle.Mutator {
|
||||
return &upload{outputHandler}
|
||||
}
|
||||
|
|
|
@ -71,6 +71,7 @@ func TestConvertCluster(t *testing.T) {
|
|||
"availability": "SPOT",
|
||||
},
|
||||
"data_security_mode": "USER_ISOLATION",
|
||||
"no_wait": true,
|
||||
"node_type_id": "m5.xlarge",
|
||||
"autoscale": map[string]any{
|
||||
"min_workers": int64(1),
|
||||
|
|
|
@ -18,6 +18,7 @@ import (
|
|||
"github.com/databricks/cli/bundle/python"
|
||||
"github.com/databricks/cli/bundle/scripts"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
terraformlib "github.com/databricks/cli/libs/terraform"
|
||||
tfjson "github.com/hashicorp/terraform-json"
|
||||
)
|
||||
|
@ -128,7 +129,7 @@ properties such as the 'catalog' or 'storage' are changed:`
|
|||
}
|
||||
|
||||
// The deploy phase deploys artifacts and resources.
|
||||
func Deploy() bundle.Mutator {
|
||||
func Deploy(outputHandler sync.OutputHandler) bundle.Mutator {
|
||||
// Core mutators that CRUD resources and modify deployment state. These
|
||||
// mutators need informed consent if they are potentially destructive.
|
||||
deployCore := bundle.Defer(
|
||||
|
@ -157,7 +158,7 @@ func Deploy() bundle.Mutator {
|
|||
libraries.ExpandGlobReferences(),
|
||||
libraries.Upload(),
|
||||
python.TransformWheelTask(),
|
||||
files.Upload(),
|
||||
files.Upload(outputHandler),
|
||||
deploy.StateUpdate(),
|
||||
deploy.StatePush(),
|
||||
permissions.ApplyWorkspaceRootPermissions(),
|
||||
|
|
|
@ -10,6 +10,7 @@ import (
|
|||
"github.com/databricks/cli/cmd/bundle/utils"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/databricks/cli/libs/diag"
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
|
@ -25,6 +26,7 @@ func newDeployCommand() *cobra.Command {
|
|||
var failOnActiveRuns bool
|
||||
var clusterId string
|
||||
var autoApprove bool
|
||||
var verbose bool
|
||||
cmd.Flags().BoolVar(&force, "force", false, "Force-override Git branch validation.")
|
||||
cmd.Flags().BoolVar(&forceLock, "force-lock", false, "Force acquisition of deployment lock.")
|
||||
cmd.Flags().BoolVar(&failOnActiveRuns, "fail-on-active-runs", false, "Fail if there are running jobs or pipelines in the deployment.")
|
||||
|
@ -32,6 +34,9 @@ func newDeployCommand() *cobra.Command {
|
|||
cmd.Flags().StringVarP(&clusterId, "cluster-id", "c", "", "Override cluster in the deployment with the given cluster ID.")
|
||||
cmd.Flags().BoolVar(&autoApprove, "auto-approve", false, "Skip interactive approvals that might be required for deployment.")
|
||||
cmd.Flags().MarkDeprecated("compute-id", "use --cluster-id instead")
|
||||
cmd.Flags().BoolVar(&verbose, "verbose", false, "Enable verbose output.")
|
||||
// Verbose flag currently only affects file sync output, it's used by the vscode extension
|
||||
cmd.Flags().MarkHidden("verbose")
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
ctx := cmd.Context()
|
||||
|
@ -56,11 +61,18 @@ func newDeployCommand() *cobra.Command {
|
|||
return nil
|
||||
})
|
||||
|
||||
var outputHandler sync.OutputHandler
|
||||
if verbose {
|
||||
outputHandler = func(ctx context.Context, c <-chan sync.Event) {
|
||||
sync.TextOutput(ctx, c, cmd.OutOrStdout())
|
||||
}
|
||||
}
|
||||
|
||||
diags = diags.Extend(
|
||||
bundle.Apply(ctx, b, bundle.Seq(
|
||||
phases.Initialize(),
|
||||
phases.Build(),
|
||||
phases.Deploy(),
|
||||
phases.Deploy(outputHandler),
|
||||
)),
|
||||
)
|
||||
}
|
||||
|
|
|
@ -55,13 +55,7 @@ task or a Python wheel task, the second example applies.
|
|||
return diags.Error()
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
phases.Initialize(),
|
||||
terraform.Interpolate(),
|
||||
terraform.Write(),
|
||||
terraform.StatePull(),
|
||||
terraform.Load(terraform.ErrorOnEmptyState),
|
||||
))
|
||||
diags = bundle.Apply(ctx, b, phases.Initialize())
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -84,6 +78,16 @@ task or a Python wheel task, the second example applies.
|
|||
return fmt.Errorf("expected a KEY of the resource to run")
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
terraform.Interpolate(),
|
||||
terraform.Write(),
|
||||
terraform.StatePull(),
|
||||
terraform.Load(terraform.ErrorOnEmptyState),
|
||||
))
|
||||
if err := diags.Error(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
runner, err := run.Find(b, args[0])
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
@ -6,7 +6,6 @@ import (
|
|||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
stdsync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -46,6 +45,21 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn
|
|||
return nil, flag.ErrHelp
|
||||
}
|
||||
|
||||
var outputFunc func(context.Context, <-chan sync.Event, io.Writer)
|
||||
switch f.output {
|
||||
case flags.OutputText:
|
||||
outputFunc = sync.TextOutput
|
||||
case flags.OutputJSON:
|
||||
outputFunc = sync.JsonOutput
|
||||
}
|
||||
|
||||
var outputHandler sync.OutputHandler
|
||||
if outputFunc != nil {
|
||||
outputHandler = func(ctx context.Context, events <-chan sync.Event) {
|
||||
outputFunc(ctx, events, cmd.OutOrStdout())
|
||||
}
|
||||
}
|
||||
|
||||
opts := sync.SyncOptions{
|
||||
LocalRoot: vfs.MustNew(args[0]),
|
||||
Paths: []string{"."},
|
||||
|
@ -62,6 +76,8 @@ func (f *syncFlags) syncOptionsFromArgs(cmd *cobra.Command, args []string) (*syn
|
|||
// exist and add it to the `.gitignore` file in the root.
|
||||
SnapshotBasePath: filepath.Join(args[0], ".databricks"),
|
||||
WorkspaceClient: root.WorkspaceClient(cmd.Context()),
|
||||
|
||||
OutputHandler: outputHandler,
|
||||
}
|
||||
return &opts, nil
|
||||
}
|
||||
|
@ -118,23 +134,7 @@ func New() *cobra.Command {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var outputFunc func(context.Context, <-chan sync.Event, io.Writer)
|
||||
switch f.output {
|
||||
case flags.OutputText:
|
||||
outputFunc = textOutput
|
||||
case flags.OutputJSON:
|
||||
outputFunc = jsonOutput
|
||||
}
|
||||
|
||||
var wg stdsync.WaitGroup
|
||||
if outputFunc != nil {
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
defer wg.Done()
|
||||
outputFunc(ctx, s.Events(), cmd.OutOrStdout())
|
||||
}()
|
||||
}
|
||||
defer s.Close()
|
||||
|
||||
if f.watch {
|
||||
err = s.RunContinuous(ctx)
|
||||
|
@ -142,8 +142,6 @@ func New() *cobra.Command {
|
|||
_, err = s.RunOnce(ctx)
|
||||
}
|
||||
|
||||
s.Close()
|
||||
wg.Wait()
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
|
@ -5,12 +5,10 @@ import (
|
|||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
|
||||
"github.com/databricks/cli/libs/sync"
|
||||
)
|
||||
|
||||
// Read synchronization events and write them as JSON to the specified writer (typically stdout).
|
||||
func jsonOutput(ctx context.Context, ch <-chan sync.Event, w io.Writer) {
|
||||
func JsonOutput(ctx context.Context, ch <-chan Event, w io.Writer) {
|
||||
enc := json.NewEncoder(w)
|
||||
for {
|
||||
select {
|
||||
|
@ -31,7 +29,7 @@ func jsonOutput(ctx context.Context, ch <-chan sync.Event, w io.Writer) {
|
|||
}
|
||||
|
||||
// Read synchronization events and write them as text to the specified writer (typically stdout).
|
||||
func textOutput(ctx context.Context, ch <-chan sync.Event, w io.Writer) {
|
||||
func TextOutput(ctx context.Context, ch <-chan Event, w io.Writer) {
|
||||
bw := bufio.NewWriter(w)
|
||||
|
||||
for {
|
|
@ -3,6 +3,7 @@ package sync
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
stdsync "sync"
|
||||
"time"
|
||||
|
||||
"github.com/databricks/cli/libs/filer"
|
||||
|
@ -15,6 +16,8 @@ import (
|
|||
"github.com/databricks/databricks-sdk-go/service/iam"
|
||||
)
|
||||
|
||||
type OutputHandler func(context.Context, <-chan Event)
|
||||
|
||||
type SyncOptions struct {
|
||||
LocalRoot vfs.Path
|
||||
Paths []string
|
||||
|
@ -34,6 +37,8 @@ type SyncOptions struct {
|
|||
CurrentUser *iam.User
|
||||
|
||||
Host string
|
||||
|
||||
OutputHandler OutputHandler
|
||||
}
|
||||
|
||||
type Sync struct {
|
||||
|
@ -49,6 +54,10 @@ type Sync struct {
|
|||
// Synchronization progress events are sent to this event notifier.
|
||||
notifier EventNotifier
|
||||
seq int
|
||||
|
||||
// WaitGroup is automatically created when an output handler is provided in the SyncOptions.
|
||||
// Close call is required to ensure the output handler goroutine handles all events in time.
|
||||
outputWaitGroup *stdsync.WaitGroup
|
||||
}
|
||||
|
||||
// New initializes and returns a new [Sync] instance.
|
||||
|
@ -106,31 +115,41 @@ func New(ctx context.Context, opts SyncOptions) (*Sync, error) {
|
|||
return nil, err
|
||||
}
|
||||
|
||||
var notifier EventNotifier
|
||||
var outputWaitGroup = &stdsync.WaitGroup{}
|
||||
if opts.OutputHandler != nil {
|
||||
ch := make(chan Event, MaxRequestsInFlight)
|
||||
notifier = &ChannelNotifier{ch}
|
||||
outputWaitGroup.Add(1)
|
||||
go func() {
|
||||
defer outputWaitGroup.Done()
|
||||
opts.OutputHandler(ctx, ch)
|
||||
}()
|
||||
} else {
|
||||
notifier = &NopNotifier{}
|
||||
}
|
||||
|
||||
return &Sync{
|
||||
SyncOptions: &opts,
|
||||
|
||||
fileSet: fileSet,
|
||||
includeFileSet: includeFileSet,
|
||||
excludeFileSet: excludeFileSet,
|
||||
snapshot: snapshot,
|
||||
filer: filer,
|
||||
notifier: &NopNotifier{},
|
||||
seq: 0,
|
||||
fileSet: fileSet,
|
||||
includeFileSet: includeFileSet,
|
||||
excludeFileSet: excludeFileSet,
|
||||
snapshot: snapshot,
|
||||
filer: filer,
|
||||
notifier: notifier,
|
||||
outputWaitGroup: outputWaitGroup,
|
||||
seq: 0,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Sync) Events() <-chan Event {
|
||||
ch := make(chan Event, MaxRequestsInFlight)
|
||||
s.notifier = &ChannelNotifier{ch}
|
||||
return ch
|
||||
}
|
||||
|
||||
func (s *Sync) Close() {
|
||||
if s.notifier == nil {
|
||||
return
|
||||
}
|
||||
s.notifier.Close()
|
||||
s.notifier = nil
|
||||
s.outputWaitGroup.Wait()
|
||||
}
|
||||
|
||||
func (s *Sync) notifyStart(ctx context.Context, d diff) {
|
||||
|
|
|
@ -3,6 +3,12 @@ resources:
|
|||
pipelines:
|
||||
{{.project_name}}_pipeline:
|
||||
name: {{.project_name}}_pipeline
|
||||
{{- if eq default_catalog ""}}
|
||||
## Specify the 'catalog' field to configure this pipeline to make use of Unity Catalog:
|
||||
# catalog: catalog_name
|
||||
{{- else}}
|
||||
catalog: {{default_catalog}}
|
||||
{{- end}}
|
||||
target: {{.project_name}}_${bundle.environment}
|
||||
libraries:
|
||||
- notebook:
|
||||
|
|
Loading…
Reference in New Issue