Merge remote-tracking branch 'origin' into fix-windows-root-filer

This commit is contained in:
Shreyas Goenka 2023-06-19 00:52:31 +02:00
commit 5368972172
No known key found for this signature in database
GPG Key ID: 92A07DF49CCB0622
36 changed files with 667 additions and 333 deletions

View File

@ -133,14 +133,18 @@ var {{.CamelName}}Cmd = &cobra.Command{
} }
{{- end -}} {{- end -}}
{{$method := .}} {{$method := .}}
{{- range $arg, $field := .Request.RequiredFields}} {{- if .Request.IsAllRequiredFieldsPrimitive -}}
{{if not $field.Entity.IsString -}} {{- range $arg, $field := .Request.RequiredFields}}
_, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}}) {{if not $field.Entity.IsString -}}
if err != nil { _, err = fmt.Sscan(args[{{$arg}}], &{{$method.CamelName}}Req.{{$field.PascalName}})
return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}]) if err != nil {
}{{else -}} return fmt.Errorf("invalid {{$field.ConstantName}}: %s", args[{{$arg}}])
{{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}] }{{else -}}
{{- end -}}{{end}} {{$method.CamelName}}Req.{{$field.PascalName}} = args[{{$arg}}]
{{- end -}}{{end}}
{{- else -}}
return fmt.Errorf("please provide command input in JSON format by specifying the --json flag")
{{- end -}}
} }
{{end}} {{end}}
{{if $wait -}} {{if $wait -}}

View File

@ -2,7 +2,7 @@
[![build](https://github.com/databricks/cli/workflows/build/badge.svg?branch=main)](https://github.com/databricks/cli/actions?query=workflow%3Abuild+branch%3Amain) [![build](https://github.com/databricks/cli/workflows/build/badge.svg?branch=main)](https://github.com/databricks/cli/actions?query=workflow%3Abuild+branch%3Amain)
This project is in private preview. This project is in public preview.
Documentation about the full REST API coverage is avaialbe in the [docs folder](docs/commands.md). Documentation about the full REST API coverage is avaialbe in the [docs folder](docs/commands.md).

View File

@ -2,10 +2,12 @@ package deployer
import ( import (
"context" "context"
"errors"
"fmt" "fmt"
"io"
"io/fs"
"os" "os"
"path/filepath" "path/filepath"
"strings"
"github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/locker"
"github.com/databricks/cli/libs/log" "github.com/databricks/cli/libs/log"
@ -97,22 +99,24 @@ func (b *Deployer) tfStateLocalPath() string {
return filepath.Join(b.DefaultTerraformRoot(), "terraform.tfstate") return filepath.Join(b.DefaultTerraformRoot(), "terraform.tfstate")
} }
func (b *Deployer) LoadTerraformState(ctx context.Context) error { func (d *Deployer) LoadTerraformState(ctx context.Context) error {
bytes, err := b.locker.GetRawJsonFileContent(ctx, b.tfStateRemotePath()) r, err := d.locker.Read(ctx, d.tfStateRemotePath())
if err != nil { if errors.Is(err, fs.ErrNotExist) {
// If remote tf state is absent, use local tf state // If remote tf state is absent, use local tf state
if strings.Contains(err.Error(), "File not found.") { return nil
return nil
} else {
return err
}
} }
err = os.MkdirAll(b.DefaultTerraformRoot(), os.ModeDir)
if err != nil { if err != nil {
return err return err
} }
err = os.WriteFile(b.tfStateLocalPath(), bytes, os.ModePerm) err = os.MkdirAll(d.DefaultTerraformRoot(), os.ModeDir)
return err if err != nil {
return err
}
b, err := io.ReadAll(r)
if err != nil {
return err
}
return os.WriteFile(d.tfStateLocalPath(), b, os.ModePerm)
} }
func (b *Deployer) SaveTerraformState(ctx context.Context) error { func (b *Deployer) SaveTerraformState(ctx context.Context) error {
@ -120,7 +124,7 @@ func (b *Deployer) SaveTerraformState(ctx context.Context) error {
if err != nil { if err != nil {
return err return err
} }
return b.locker.PutFile(ctx, b.tfStateRemotePath(), bytes) return b.locker.Write(ctx, b.tfStateRemotePath(), bytes)
} }
func (d *Deployer) Lock(ctx context.Context, isForced bool) error { func (d *Deployer) Lock(ctx context.Context, isForced bool) error {

View File

@ -55,10 +55,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
_, err = fmt.Sscan(args[0], &createReq.Budget) return fmt.Errorf("provide command input in JSON format by specifying --json option")
if err != nil {
return fmt.Errorf("invalid BUDGET: %s", args[0])
}
} }
response, err := a.Budgets.Create(ctx, createReq) response, err := a.Budgets.Create(ctx, createReq)
@ -257,11 +254,7 @@ var updateCmd = &cobra.Command{
return err return err
} }
} else { } else {
_, err = fmt.Sscan(args[0], &updateReq.Budget) return fmt.Errorf("provide command input in JSON format by specifying --json option")
if err != nil {
return fmt.Errorf("invalid BUDGET: %s", args[0])
}
updateReq.BudgetId = args[1]
} }
err = a.Budgets.Update(ctx, updateReq) err = a.Budgets.Update(ctx, updateReq)

View File

@ -68,11 +68,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.CredentialsName = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &createReq.AwsCredentials)
if err != nil {
return fmt.Errorf("invalid AWS_CREDENTIALS: %s", args[1])
}
} }
response, err := a.Credentials.Create(ctx, createReq) response, err := a.Credentials.Create(ctx, createReq)

View File

@ -63,11 +63,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.Name = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &createReq.RedirectUrls)
if err != nil {
return fmt.Errorf("invalid REDIRECT_URLS: %s", args[1])
}
} }
response, err := a.CustomAppIntegration.Create(ctx, createReq) response, err := a.CustomAppIntegration.Create(ctx, createReq)

View File

@ -85,10 +85,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
_, err = fmt.Sscan(args[0], &createReq.UseCases) return fmt.Errorf("provide command input in JSON format by specifying --json option")
if err != nil {
return fmt.Errorf("invalid USE_CASES: %s", args[0])
}
} }
response, err := a.EncryptionKeys.Create(ctx, createReq) response, err := a.EncryptionKeys.Create(ctx, createReq)

View File

@ -85,15 +85,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.Label = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &createReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[1])
}
_, err = fmt.Sscan(args[2], &createReq.IpAddresses)
if err != nil {
return fmt.Errorf("invalid IP_ADDRESSES: %s", args[2])
}
} }
response, err := a.IpAccessLists.Create(ctx, createReq) response, err := a.IpAccessLists.Create(ctx, createReq)
@ -300,20 +292,7 @@ var replaceCmd = &cobra.Command{
return err return err
} }
} else { } else {
replaceReq.Label = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &replaceReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[1])
}
_, err = fmt.Sscan(args[2], &replaceReq.IpAddresses)
if err != nil {
return fmt.Errorf("invalid IP_ADDRESSES: %s", args[2])
}
_, err = fmt.Sscan(args[3], &replaceReq.Enabled)
if err != nil {
return fmt.Errorf("invalid ENABLED: %s", args[3])
}
replaceReq.IpAccessListId = args[4]
} }
err = a.IpAccessLists.Replace(ctx, replaceReq) err = a.IpAccessLists.Replace(ctx, replaceReq)
@ -372,20 +351,7 @@ var updateCmd = &cobra.Command{
return err return err
} }
} else { } else {
updateReq.Label = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &updateReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[1])
}
_, err = fmt.Sscan(args[2], &updateReq.IpAddresses)
if err != nil {
return fmt.Errorf("invalid IP_ADDRESSES: %s", args[2])
}
_, err = fmt.Sscan(args[3], &updateReq.Enabled)
if err != nil {
return fmt.Errorf("invalid ENABLED: %s", args[3])
}
updateReq.IpAccessListId = args[4]
} }
err = a.IpAccessLists.Update(ctx, updateReq) err = a.IpAccessLists.Update(ctx, updateReq)

View File

@ -65,11 +65,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.StorageConfigurationName = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &createReq.RootBucketInfo)
if err != nil {
return fmt.Errorf("invalid ROOT_BUCKET_INFO: %s", args[1])
}
} }
response, err := a.Storage.Create(ctx, createReq) response, err := a.Storage.Create(ctx, createReq)

View File

@ -221,18 +221,7 @@ var updateCmd = &cobra.Command{
return err return err
} }
} else { } else {
_, err = fmt.Sscan(args[0], &updateReq.Permissions) return fmt.Errorf("provide command input in JSON format by specifying --json option")
if err != nil {
return fmt.Errorf("invalid PERMISSIONS: %s", args[0])
}
_, err = fmt.Sscan(args[1], &updateReq.WorkspaceId)
if err != nil {
return fmt.Errorf("invalid WORKSPACE_ID: %s", args[1])
}
_, err = fmt.Sscan(args[2], &updateReq.PrincipalId)
if err != nil {
return fmt.Errorf("invalid PRINCIPAL_ID: %s", args[2])
}
} }
err = a.WorkspaceAssignment.Update(ctx, updateReq) err = a.WorkspaceAssignment.Update(ctx, updateReq)

View File

@ -3,7 +3,6 @@ package fs
import ( import (
"github.com/databricks/cli/cmd/root" "github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/filer"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -16,14 +15,8 @@ var catCmd = &cobra.Command{
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context() ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
path, err := resolveDbfsPath(args[0]) f, path, err := filerForPath(ctx, args[0])
if err != nil {
return err
}
f, err := filer.NewDbfsClient(w, "/")
if err != nil { if err != nil {
return err return err
} }

179
cmd/fs/cp.go Normal file
View File

@ -0,0 +1,179 @@
package fs
import (
"context"
"errors"
"fmt"
"io/fs"
"path"
"path/filepath"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/filer"
"github.com/spf13/cobra"
)
type copy struct {
ctx context.Context
sourceFiler filer.Filer
targetFiler filer.Filer
}
func (c *copy) cpWriteCallback(sourceDir, targetDir string) fs.WalkDirFunc {
return func(sourcePath string, d fs.DirEntry, err error) error {
if err != nil {
return err
}
// Compute path relative to the target directory
relPath, err := filepath.Rel(sourceDir, sourcePath)
if err != nil {
return err
}
relPath = filepath.ToSlash(relPath)
// Compute target path for the file
targetPath := path.Join(targetDir, relPath)
// create directory and return early
if d.IsDir() {
return c.targetFiler.Mkdir(c.ctx, targetPath)
}
return c.cpFileToFile(sourcePath, targetPath)
}
}
func (c *copy) cpDirToDir(sourceDir, targetDir string) error {
if !cpRecursive {
return fmt.Errorf("source path %s is a directory. Please specify the --recursive flag", sourceDir)
}
sourceFs := filer.NewFS(c.ctx, c.sourceFiler)
return fs.WalkDir(sourceFs, sourceDir, c.cpWriteCallback(sourceDir, targetDir))
}
func (c *copy) cpFileToDir(sourcePath, targetDir string) error {
fileName := path.Base(sourcePath)
targetPath := path.Join(targetDir, fileName)
return c.cpFileToFile(sourcePath, targetPath)
}
func (c *copy) cpFileToFile(sourcePath, targetPath string) error {
// Get reader for file at source path
r, err := c.sourceFiler.Read(c.ctx, sourcePath)
if err != nil {
return err
}
defer r.Close()
if cpOverwrite {
err = c.targetFiler.Write(c.ctx, targetPath, r, filer.OverwriteIfExists)
if err != nil {
return err
}
} else {
err = c.targetFiler.Write(c.ctx, targetPath, r)
// skip if file already exists
if err != nil && errors.Is(err, fs.ErrExist) {
return emitCpFileSkippedEvent(c.ctx, sourcePath, targetPath)
}
if err != nil {
return err
}
}
return emitCpFileCopiedEvent(c.ctx, sourcePath, targetPath)
}
// TODO: emit these events on stderr
// TODO: add integration tests for these events
func emitCpFileSkippedEvent(ctx context.Context, sourcePath, targetPath string) error {
event := newFileSkippedEvent(sourcePath, targetPath)
template := "{{.SourcePath}} -> {{.TargetPath}} (skipped; already exists)\n"
return cmdio.RenderWithTemplate(ctx, event, template)
}
func emitCpFileCopiedEvent(ctx context.Context, sourcePath, targetPath string) error {
event := newFileCopiedEvent(sourcePath, targetPath)
template := "{{.SourcePath}} -> {{.TargetPath}}\n"
return cmdio.RenderWithTemplate(ctx, event, template)
}
var cpOverwrite bool
var cpRecursive bool
// cpCmd represents the fs cp command
var cpCmd = &cobra.Command{
Use: "cp SOURCE_PATH TARGET_PATH",
Short: "Copy files and directories to and from DBFS.",
Long: `Copy files to and from DBFS.
It is required that you specify the scheme "file" for local files and
"dbfs" for dbfs files. For example: file:/foo/bar, file:/c:/foo/bar or dbfs:/foo/bar.
Recursively copying a directory will copy all files inside directory
at SOURCE_PATH to the directory at TARGET_PATH.
When copying a file, if TARGET_PATH is a directory, the file will be created
inside the directory, otherwise the file is created at TARGET_PATH.
`,
Args: cobra.ExactArgs(2),
PreRunE: root.MustWorkspaceClient,
RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context()
// TODO: Error if a user uses '\' as path separator on windows when "file"
// scheme is specified (https://github.com/databricks/cli/issues/485)
// Get source filer and source path without scheme
fullSourcePath := args[0]
sourceFiler, sourcePath, err := filerForPath(ctx, fullSourcePath)
if err != nil {
return err
}
// Get target filer and target path without scheme
fullTargetPath := args[1]
targetFiler, targetPath, err := filerForPath(ctx, fullTargetPath)
if err != nil {
return err
}
c := copy{
ctx: ctx,
sourceFiler: sourceFiler,
targetFiler: targetFiler,
}
// Get information about file at source path
sourceInfo, err := sourceFiler.Stat(ctx, sourcePath)
if err != nil {
return err
}
// case 1: source path is a directory, then recursively create files at target path
if sourceInfo.IsDir() {
return c.cpDirToDir(sourcePath, targetPath)
}
// case 2: source path is a file, and target path is a directory. In this case
// we copy the file to inside the directory
if targetInfo, err := targetFiler.Stat(ctx, targetPath); err == nil && targetInfo.IsDir() {
return c.cpFileToDir(sourcePath, targetPath)
}
// case 3: source path is a file, and target path is a file
return c.cpFileToFile(sourcePath, targetPath)
},
}
func init() {
cpCmd.Flags().BoolVar(&cpOverwrite, "overwrite", false, "overwrite existing files")
cpCmd.Flags().BoolVarP(&cpRecursive, "recursive", "r", false, "recursively copy files from directory")
fsCmd.AddCommand(cpCmd)
}

30
cmd/fs/events.go Normal file
View File

@ -0,0 +1,30 @@
package fs
type fileIOEvent struct {
SourcePath string `json:"source_path,omitempty"`
TargetPath string `json:"target_path,omitempty"`
Type EventType `json:"type"`
}
type EventType string
const (
EventTypeFileCopied = EventType("FILE_COPIED")
EventTypeFileSkipped = EventType("FILE_SKIPPED")
)
func newFileCopiedEvent(sourcePath, targetPath string) fileIOEvent {
return fileIOEvent{
SourcePath: sourcePath,
TargetPath: targetPath,
Type: EventTypeFileCopied,
}
}
func newFileSkippedEvent(sourcePath, targetPath string) fileIOEvent {
return fileIOEvent{
SourcePath: sourcePath,
TargetPath: targetPath,
Type: EventTypeFileSkipped,
}
}

View File

@ -1,14 +1,51 @@
package fs package fs
import ( import (
"context"
"fmt" "fmt"
"runtime"
"strings" "strings"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/filer"
) )
func resolveDbfsPath(path string) (string, error) { type Scheme string
if !strings.HasPrefix(path, "dbfs:/") {
return "", fmt.Errorf("expected dbfs path (with the dbfs:/ prefix): %s", path)
}
return strings.TrimPrefix(path, "dbfs:"), nil const (
DbfsScheme = Scheme("dbfs")
LocalScheme = Scheme("file")
NoScheme = Scheme("")
)
func filerForPath(ctx context.Context, fullPath string) (filer.Filer, string, error) {
parts := strings.SplitN(fullPath, ":/", 2)
if len(parts) < 2 {
return nil, "", fmt.Errorf(`no scheme specified for path %s. Please specify scheme "dbfs" or "file". Example: file:/foo/bar or file:/c:/foo/bar`, fullPath)
}
scheme := Scheme(parts[0])
path := parts[1]
switch scheme {
case DbfsScheme:
w := root.WorkspaceClient(ctx)
f, err := filer.NewDbfsClient(w, "/")
return f, path, err
case LocalScheme:
if runtime.GOOS == "windows" {
parts := strings.SplitN(path, ":", 2)
if len(parts) < 2 {
return nil, "", fmt.Errorf("no volume specfied for path: %s", path)
}
volume := parts[0] + ":"
relPath := parts[1]
f, err := filer.NewLocalClient(volume)
return f, relPath, err
}
f, err := filer.NewLocalClient("/")
return f, path, err
default:
return nil, "", fmt.Errorf(`unsupported scheme %s specified for path %s. Please specify scheme "dbfs" or "file". Example: file:/foo/bar or file:/c:/foo/bar`, scheme, fullPath)
}
} }

View File

@ -1,38 +1,26 @@
package fs package fs
import ( import (
"context"
"runtime"
"testing" "testing"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
) )
func TestResolveDbfsPath(t *testing.T) { func TestNotSpecifyingVolumeForWindowsPathErrors(t *testing.T) {
path, err := resolveDbfsPath("dbfs:/") if runtime.GOOS != "windows" {
t.Skip()
}
ctx := context.Background()
pathWithVolume := `file:/c:/foo/bar`
pathWOVolume := `file:/uno/dos`
_, path, err := filerForPath(ctx, pathWithVolume)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "/", path) assert.Equal(t, `/foo/bar`, path)
path, err = resolveDbfsPath("dbfs:/abc") _, _, err = filerForPath(ctx, pathWOVolume)
assert.NoError(t, err) assert.Equal(t, "no volume specfied for path: uno/dos", err.Error())
assert.Equal(t, "/abc", path)
path, err = resolveDbfsPath("dbfs:/a/b/c")
assert.NoError(t, err)
assert.Equal(t, "/a/b/c", path)
path, err = resolveDbfsPath("dbfs:/a/b/.")
assert.NoError(t, err)
assert.Equal(t, "/a/b/.", path)
path, err = resolveDbfsPath("dbfs:/a/../c")
assert.NoError(t, err)
assert.Equal(t, "/a/../c", path)
_, err = resolveDbfsPath("dbf:/a/b/c")
assert.ErrorContains(t, err, "expected dbfs path (with the dbfs:/ prefix): dbf:/a/b/c")
_, err = resolveDbfsPath("/a/b/c")
assert.ErrorContains(t, err, "expected dbfs path (with the dbfs:/ prefix): /a/b/c")
_, err = resolveDbfsPath("dbfs:a/b/c")
assert.ErrorContains(t, err, "expected dbfs path (with the dbfs:/ prefix): dbfs:a/b/c")
} }

View File

@ -7,7 +7,6 @@ import (
"github.com/databricks/cli/cmd/root" "github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio" "github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/filer"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -42,14 +41,8 @@ var lsCmd = &cobra.Command{
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context() ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
path, err := resolveDbfsPath(args[0]) f, path, err := filerForPath(ctx, args[0])
if err != nil {
return err
}
f, err := filer.NewDbfsClient(w, "/")
if err != nil { if err != nil {
return err return err
} }

View File

@ -2,7 +2,6 @@ package fs
import ( import (
"github.com/databricks/cli/cmd/root" "github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/filer"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -18,14 +17,8 @@ var mkdirCmd = &cobra.Command{
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context() ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
path, err := resolveDbfsPath(args[0]) f, path, err := filerForPath(ctx, args[0])
if err != nil {
return err
}
f, err := filer.NewDbfsClient(w, "/")
if err != nil { if err != nil {
return err return err
} }

View File

@ -2,7 +2,7 @@ package fs
import ( import (
"github.com/databricks/cli/cmd/root" "github.com/databricks/cli/cmd/root"
"github.com/databricks/databricks-sdk-go/service/files" "github.com/databricks/cli/libs/filer"
"github.com/spf13/cobra" "github.com/spf13/cobra"
) )
@ -15,17 +15,16 @@ var rmCmd = &cobra.Command{
RunE: func(cmd *cobra.Command, args []string) error { RunE: func(cmd *cobra.Command, args []string) error {
ctx := cmd.Context() ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
path, err := resolveDbfsPath(args[0]) f, path, err := filerForPath(ctx, args[0])
if err != nil { if err != nil {
return err return err
} }
return w.Dbfs.Delete(ctx, files.Delete{ if recursive {
Path: path, return f.Delete(ctx, path, filer.DeleteRecursively)
Recursive: recursive, }
}) return f.Delete(ctx, path)
}, },
} }

View File

@ -60,7 +60,7 @@ func (l *friendlyHandler) Handle(ctx context.Context, rec slog.Record) error {
msg := fmt.Sprintf("%s %s %s%s\n", msg := fmt.Sprintf("%s %s %s%s\n",
color.MagentaString(t), color.MagentaString(t),
l.coloredLevel(rec), l.coloredLevel(rec),
color.HiWhiteString(rec.Message), rec.Message,
attrs) attrs)
_, err := l.w.Write([]byte(msg)) _, err := l.w.Write([]byte(msg))
return err return err

View File

@ -60,12 +60,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.Name = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &createReq.Options)
if err != nil {
return fmt.Errorf("invalid OPTIONS: %s", args[1])
}
createReq.QueryId = args[2]
} }
response, err := w.Alerts.Create(ctx, createReq) response, err := w.Alerts.Create(ctx, createReq)
@ -265,13 +260,7 @@ var updateCmd = &cobra.Command{
return err return err
} }
} else { } else {
updateReq.Name = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &updateReq.Options)
if err != nil {
return fmt.Errorf("invalid OPTIONS: %s", args[1])
}
updateReq.QueryId = args[2]
updateReq.AlertId = args[3]
} }
err = w.Alerts.Update(ctx, updateReq) err = w.Alerts.Update(ctx, updateReq)

View File

@ -73,15 +73,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.Name = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &createReq.ConnectionType)
if err != nil {
return fmt.Errorf("invalid CONNECTION_TYPE: %s", args[1])
}
_, err = fmt.Sscan(args[2], &createReq.OptionsKvpairs)
if err != nil {
return fmt.Errorf("invalid OPTIONS_KVPAIRS: %s", args[2])
}
} }
response, err := w.Connections.Create(ctx, createReq) response, err := w.Connections.Create(ctx, createReq)
@ -277,12 +269,7 @@ var updateCmd = &cobra.Command{
return err return err
} }
} else { } else {
updateReq.Name = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &updateReq.OptionsKvpairs)
if err != nil {
return fmt.Errorf("invalid OPTIONS_KVPAIRS: %s", args[1])
}
updateReq.NameArg = args[2]
} }
response, err := w.Connections.Update(ctx, updateReq) response, err := w.Connections.Update(ctx, updateReq)

View File

@ -66,52 +66,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.Name = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
createReq.CatalogName = args[1]
createReq.SchemaName = args[2]
_, err = fmt.Sscan(args[3], &createReq.InputParams)
if err != nil {
return fmt.Errorf("invalid INPUT_PARAMS: %s", args[3])
}
_, err = fmt.Sscan(args[4], &createReq.DataType)
if err != nil {
return fmt.Errorf("invalid DATA_TYPE: %s", args[4])
}
createReq.FullDataType = args[5]
_, err = fmt.Sscan(args[6], &createReq.ReturnParams)
if err != nil {
return fmt.Errorf("invalid RETURN_PARAMS: %s", args[6])
}
_, err = fmt.Sscan(args[7], &createReq.RoutineBody)
if err != nil {
return fmt.Errorf("invalid ROUTINE_BODY: %s", args[7])
}
createReq.RoutineDefinition = args[8]
_, err = fmt.Sscan(args[9], &createReq.RoutineDependencies)
if err != nil {
return fmt.Errorf("invalid ROUTINE_DEPENDENCIES: %s", args[9])
}
_, err = fmt.Sscan(args[10], &createReq.ParameterStyle)
if err != nil {
return fmt.Errorf("invalid PARAMETER_STYLE: %s", args[10])
}
_, err = fmt.Sscan(args[11], &createReq.IsDeterministic)
if err != nil {
return fmt.Errorf("invalid IS_DETERMINISTIC: %s", args[11])
}
_, err = fmt.Sscan(args[12], &createReq.SqlDataAccess)
if err != nil {
return fmt.Errorf("invalid SQL_DATA_ACCESS: %s", args[12])
}
_, err = fmt.Sscan(args[13], &createReq.IsNullCall)
if err != nil {
return fmt.Errorf("invalid IS_NULL_CALL: %s", args[13])
}
_, err = fmt.Sscan(args[14], &createReq.SecurityType)
if err != nil {
return fmt.Errorf("invalid SECURITY_TYPE: %s", args[14])
}
createReq.SpecificName = args[15]
} }
response, err := w.Functions.Create(ctx, createReq) response, err := w.Functions.Create(ctx, createReq)

View File

@ -53,6 +53,7 @@ func init() {
// TODO: map via StringToStringVar: custom_tags // TODO: map via StringToStringVar: custom_tags
// TODO: complex arg: disk_spec // TODO: complex arg: disk_spec
createCmd.Flags().BoolVar(&createReq.EnableElasticDisk, "enable-elastic-disk", createReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`) createCmd.Flags().BoolVar(&createReq.EnableElasticDisk, "enable-elastic-disk", createReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`)
// TODO: complex arg: gcp_attributes
createCmd.Flags().IntVar(&createReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", createReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`) createCmd.Flags().IntVar(&createReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", createReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`)
// TODO: complex arg: instance_pool_fleet_attributes // TODO: complex arg: instance_pool_fleet_attributes
createCmd.Flags().IntVar(&createReq.MaxCapacity, "max-capacity", createReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`) createCmd.Flags().IntVar(&createReq.MaxCapacity, "max-capacity", createReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`)
@ -179,6 +180,7 @@ func init() {
// TODO: map via StringToStringVar: custom_tags // TODO: map via StringToStringVar: custom_tags
// TODO: complex arg: disk_spec // TODO: complex arg: disk_spec
editCmd.Flags().BoolVar(&editReq.EnableElasticDisk, "enable-elastic-disk", editReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`) editCmd.Flags().BoolVar(&editReq.EnableElasticDisk, "enable-elastic-disk", editReq.EnableElasticDisk, `Autoscaling Local Storage: when enabled, this instances in this pool will dynamically acquire additional disk space when its Spark workers are running low on disk space.`)
// TODO: complex arg: gcp_attributes
editCmd.Flags().IntVar(&editReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", editReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`) editCmd.Flags().IntVar(&editReq.IdleInstanceAutoterminationMinutes, "idle-instance-autotermination-minutes", editReq.IdleInstanceAutoterminationMinutes, `Automatically terminates the extra instances in the pool cache after they are inactive for this time in minutes if min_idle_instances requirement is already met.`)
// TODO: complex arg: instance_pool_fleet_attributes // TODO: complex arg: instance_pool_fleet_attributes
editCmd.Flags().IntVar(&editReq.MaxCapacity, "max-capacity", editReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`) editCmd.Flags().IntVar(&editReq.MaxCapacity, "max-capacity", editReq.MaxCapacity, `Maximum number of outstanding instances to keep in the pool, including both instances used by clusters and idle instances.`)

View File

@ -86,15 +86,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.Label = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &createReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[1])
}
_, err = fmt.Sscan(args[2], &createReq.IpAddresses)
if err != nil {
return fmt.Errorf("invalid IP_ADDRESSES: %s", args[2])
}
} }
response, err := w.IpAccessLists.Create(ctx, createReq) response, err := w.IpAccessLists.Create(ctx, createReq)
@ -303,20 +295,7 @@ var replaceCmd = &cobra.Command{
return err return err
} }
} else { } else {
replaceReq.Label = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &replaceReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[1])
}
_, err = fmt.Sscan(args[2], &replaceReq.IpAddresses)
if err != nil {
return fmt.Errorf("invalid IP_ADDRESSES: %s", args[2])
}
_, err = fmt.Sscan(args[3], &replaceReq.Enabled)
if err != nil {
return fmt.Errorf("invalid ENABLED: %s", args[3])
}
replaceReq.IpAccessListId = args[4]
} }
err = w.IpAccessLists.Replace(ctx, replaceReq) err = w.IpAccessLists.Replace(ctx, replaceReq)
@ -377,20 +356,7 @@ var updateCmd = &cobra.Command{
return err return err
} }
} else { } else {
updateReq.Label = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &updateReq.ListType)
if err != nil {
return fmt.Errorf("invalid LIST_TYPE: %s", args[1])
}
_, err = fmt.Sscan(args[2], &updateReq.IpAddresses)
if err != nil {
return fmt.Errorf("invalid IP_ADDRESSES: %s", args[2])
}
_, err = fmt.Sscan(args[3], &updateReq.Enabled)
if err != nil {
return fmt.Errorf("invalid ENABLED: %s", args[3])
}
updateReq.IpAccessListId = args[4]
} }
err = w.IpAccessLists.Update(ctx, updateReq) err = w.IpAccessLists.Update(ctx, updateReq)

View File

@ -912,14 +912,7 @@ var resetCmd = &cobra.Command{
return err return err
} }
} else { } else {
_, err = fmt.Sscan(args[0], &resetReq.JobId) return fmt.Errorf("provide command input in JSON format by specifying --json option")
if err != nil {
return fmt.Errorf("invalid JOB_ID: %s", args[0])
}
_, err = fmt.Sscan(args[1], &resetReq.NewSettings)
if err != nil {
return fmt.Errorf("invalid NEW_SETTINGS: %s", args[1])
}
} }
err = w.Jobs.Reset(ctx, resetReq) err = w.Jobs.Reset(ctx, resetReq)

View File

@ -172,11 +172,7 @@ var installCmd = &cobra.Command{
return err return err
} }
} else { } else {
installReq.ClusterId = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &installReq.Libraries)
if err != nil {
return fmt.Errorf("invalid LIBRARIES: %s", args[1])
}
} }
err = w.Libraries.Install(ctx, installReq) err = w.Libraries.Install(ctx, installReq)
@ -222,11 +218,7 @@ var uninstallCmd = &cobra.Command{
return err return err
} }
} else { } else {
uninstallReq.ClusterId = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &uninstallReq.Libraries)
if err != nil {
return fmt.Errorf("invalid LIBRARIES: %s", args[1])
}
} }
err = w.Libraries.Uninstall(ctx, uninstallReq) err = w.Libraries.Uninstall(ctx, uninstallReq)

View File

@ -349,10 +349,7 @@ var createWebhookCmd = &cobra.Command{
return err return err
} }
} else { } else {
_, err = fmt.Sscan(args[0], &createWebhookReq.Events) return fmt.Errorf("provide command input in JSON format by specifying --json option")
if err != nil {
return fmt.Errorf("invalid EVENTS: %s", args[0])
}
} }
response, err := w.ModelRegistry.CreateWebhook(ctx, createWebhookReq) response, err := w.ModelRegistry.CreateWebhook(ctx, createWebhookReq)

View File

@ -120,11 +120,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.Name = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &createReq.Config)
if err != nil {
return fmt.Errorf("invalid CONFIG: %s", args[1])
}
} }
wait, err := w.ServingEndpoints.Create(ctx, createReq) wait, err := w.ServingEndpoints.Create(ctx, createReq)
@ -474,11 +470,7 @@ var updateConfigCmd = &cobra.Command{
return err return err
} }
} else { } else {
_, err = fmt.Sscan(args[0], &updateConfigReq.ServedModels) return fmt.Errorf("provide command input in JSON format by specifying --json option")
if err != nil {
return fmt.Errorf("invalid SERVED_MODELS: %s", args[0])
}
updateConfigReq.Name = args[1]
} }
wait, err := w.ServingEndpoints.UpdateConfig(ctx, updateConfigReq) wait, err := w.ServingEndpoints.UpdateConfig(ctx, updateConfigReq)

View File

@ -72,11 +72,7 @@ var createCmd = &cobra.Command{
return err return err
} }
} else { } else {
createReq.FullNameArg = args[0] return fmt.Errorf("provide command input in JSON format by specifying --json option")
_, err = fmt.Sscan(args[1], &createReq.Constraint)
if err != nil {
return fmt.Errorf("invalid CONSTRAINT: %s", args[1])
}
} }
response, err := w.TableConstraints.Create(ctx, createReq) response, err := w.TableConstraints.Create(ctx, createReq)

View File

@ -119,9 +119,9 @@ var exportCmd = &cobra.Command{
If path does not exist, this call returns an error If path does not exist, this call returns an error
RESOURCE_DOES_NOT_EXIST. RESOURCE_DOES_NOT_EXIST.
One can only export a directory in DBC format. If the exported data would If the exported data would exceed size limit, this call returns
exceed size limit, this call returns MAX_NOTEBOOK_SIZE_EXCEEDED. Currently, MAX_NOTEBOOK_SIZE_EXCEEDED. Currently, this API does not support exporting a
this API does not support exporting a library.`, library.`,
Annotations: map[string]string{}, Annotations: map[string]string{},
PreRunE: root.MustWorkspaceClient, PreRunE: root.MustWorkspaceClient,
@ -286,7 +286,7 @@ func init() {
// TODO: short flags // TODO: short flags
listCmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`) listCmd.Flags().Var(&listJson, "json", `either inline JSON string or @path/to/file.json with request body`)
listCmd.Flags().IntVar(&listReq.NotebooksModifiedAfter, "notebooks-modified-after", listReq.NotebooksModifiedAfter, `<content needed>.`) listCmd.Flags().IntVar(&listReq.NotebooksModifiedAfter, "notebooks-modified-after", listReq.NotebooksModifiedAfter, `UTC timestamp in milliseconds.`)
} }
@ -295,7 +295,7 @@ var listCmd = &cobra.Command{
Short: `List contents.`, Short: `List contents.`,
Long: `List contents. Long: `List contents.
Lists the contents of a directory, or the object if it is not a directory.If Lists the contents of a directory, or the object if it is not a directory. If
the input path does not exist, this call returns an error the input path does not exist, this call returns an error
RESOURCE_DOES_NOT_EXIST.`, RESOURCE_DOES_NOT_EXIST.`,
@ -353,7 +353,7 @@ var mkdirsCmd = &cobra.Command{
path, this call returns an error RESOURCE_ALREADY_EXISTS. path, this call returns an error RESOURCE_ALREADY_EXISTS.
Note that if this operation fails it may have succeeded in creating some of Note that if this operation fails it may have succeeded in creating some of
the necessary parrent directories.`, the necessary parent directories.`,
Annotations: map[string]string{}, Annotations: map[string]string{},
PreRunE: root.MustWorkspaceClient, PreRunE: root.MustWorkspaceClient,

14
internal/alerts_test.go Normal file
View File

@ -0,0 +1,14 @@
package internal
import (
"testing"
"github.com/stretchr/testify/assert"
)
func TestAccAlertsCreateErrWhenNoArguments(t *testing.T) {
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
_, _, err := RequireErrorRun(t, "alerts", "create")
assert.Equal(t, "provide command input in JSON format by specifying --json option", err.Error())
}

296
internal/fs_cp_test.go Normal file
View File

@ -0,0 +1,296 @@
package internal
import (
"context"
"fmt"
"io"
"path"
"path/filepath"
"strings"
"testing"
"github.com/databricks/cli/libs/filer"
"github.com/databricks/databricks-sdk-go"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func setupSourceDir(t *testing.T, ctx context.Context, f filer.Filer) {
var err error
err = f.Write(ctx, "pyNb.py", strings.NewReader("# Databricks notebook source\nprint(123)"))
require.NoError(t, err)
err = f.Write(ctx, "query.sql", strings.NewReader("SELECT 1"))
require.NoError(t, err)
err = f.Write(ctx, "a/b/c/hello.txt", strings.NewReader("hello, world\n"), filer.CreateParentDirectories)
require.NoError(t, err)
}
func setupSourceFile(t *testing.T, ctx context.Context, f filer.Filer) {
err := f.Write(ctx, "foo.txt", strings.NewReader("abc"))
require.NoError(t, err)
}
func assertTargetFile(t *testing.T, ctx context.Context, f filer.Filer, relPath string) {
var err error
r, err := f.Read(ctx, relPath)
assert.NoError(t, err)
defer r.Close()
b, err := io.ReadAll(r)
require.NoError(t, err)
assert.Equal(t, "abc", string(b))
}
func assertFileContent(t *testing.T, ctx context.Context, f filer.Filer, path, expectedContent string) {
r, err := f.Read(ctx, path)
require.NoError(t, err)
defer r.Close()
b, err := io.ReadAll(r)
require.NoError(t, err)
assert.Equal(t, expectedContent, string(b))
}
func assertTargetDir(t *testing.T, ctx context.Context, f filer.Filer) {
assertFileContent(t, ctx, f, "pyNb.py", "# Databricks notebook source\nprint(123)")
assertFileContent(t, ctx, f, "query.sql", "SELECT 1")
assertFileContent(t, ctx, f, "a/b/c/hello.txt", "hello, world\n")
}
func setupLocalFiler(t *testing.T) (filer.Filer, string) {
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
tmp := t.TempDir()
f, err := filer.NewLocalClient(tmp)
require.NoError(t, err)
return f, path.Join("file:/", filepath.ToSlash(tmp))
}
func setupDbfsFiler(t *testing.T) (filer.Filer, string) {
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
w, err := databricks.NewWorkspaceClient()
require.NoError(t, err)
tmpDir := temporaryDbfsDir(t, w)
f, err := filer.NewDbfsClient(w, tmpDir)
require.NoError(t, err)
return f, path.Join("dbfs:/", tmpDir)
}
type cpTest struct {
setupSource func(*testing.T) (filer.Filer, string)
setupTarget func(*testing.T) (filer.Filer, string)
}
func setupTable() []cpTest {
return []cpTest{
{setupSource: setupLocalFiler, setupTarget: setupLocalFiler},
{setupSource: setupLocalFiler, setupTarget: setupDbfsFiler},
{setupSource: setupDbfsFiler, setupTarget: setupLocalFiler},
{setupSource: setupDbfsFiler, setupTarget: setupDbfsFiler},
}
}
func TestAccFsCpDir(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceDir(t, ctx, sourceFiler)
RequireSuccessfulRun(t, "fs", "cp", "-r", sourceDir, targetDir)
assertTargetDir(t, ctx, targetFiler)
}
}
func TestAccFsCpFileToFile(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceFile(t, ctx, sourceFiler)
RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), path.Join(targetDir, "bar.txt"))
assertTargetFile(t, ctx, targetFiler, "bar.txt")
}
}
func TestAccFsCpFileToDir(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceFile(t, ctx, sourceFiler)
RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "foo.txt"), targetDir)
assertTargetFile(t, ctx, targetFiler, "foo.txt")
}
}
func TestAccFsCpDirToDirFileNotOverwritten(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceDir(t, ctx, sourceFiler)
// Write a conflicting file to target
err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories)
require.NoError(t, err)
RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive")
assertFileContent(t, ctx, targetFiler, "a/b/c/hello.txt", "this should not be overwritten")
assertFileContent(t, ctx, targetFiler, "query.sql", "SELECT 1")
assertFileContent(t, ctx, targetFiler, "pyNb.py", "# Databricks notebook source\nprint(123)")
}
}
func TestAccFsCpFileToDirFileNotOverwritten(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceDir(t, ctx, sourceFiler)
// Write a conflicting file to target
err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories)
require.NoError(t, err)
RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"))
assertFileContent(t, ctx, targetFiler, "a/b/c/hello.txt", "this should not be overwritten")
}
}
func TestAccFsCpFileToFileFileNotOverwritten(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceDir(t, ctx, sourceFiler)
// Write a conflicting file to target
err := targetFiler.Write(ctx, "a/b/c/hola.txt", strings.NewReader("this should not be overwritten"), filer.CreateParentDirectories)
require.NoError(t, err)
RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/hola.txt"), "--recursive")
assertFileContent(t, ctx, targetFiler, "a/b/c/hola.txt", "this should not be overwritten")
}
}
func TestAccFsCpDirToDirWithOverwriteFlag(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceDir(t, ctx, sourceFiler)
// Write a conflicting file to target
err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this will be overwritten"), filer.CreateParentDirectories)
require.NoError(t, err)
RequireSuccessfulRun(t, "fs", "cp", sourceDir, targetDir, "--recursive", "--overwrite")
assertTargetDir(t, ctx, targetFiler)
}
}
func TestAccFsCpFileToFileWithOverwriteFlag(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceDir(t, ctx, sourceFiler)
// Write a conflicting file to target
err := targetFiler.Write(ctx, "a/b/c/hola.txt", strings.NewReader("this will be overwritten. Such is life."), filer.CreateParentDirectories)
require.NoError(t, err)
RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c/hola.txt"), "--overwrite")
assertFileContent(t, ctx, targetFiler, "a/b/c/hola.txt", "hello, world\n")
}
}
func TestAccFsCpFileToDirWithOverwriteFlag(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceDir(t, ctx, sourceFiler)
// Write a conflicting file to target
err := targetFiler.Write(ctx, "a/b/c/hello.txt", strings.NewReader("this will be overwritten :') "), filer.CreateParentDirectories)
require.NoError(t, err)
RequireSuccessfulRun(t, "fs", "cp", path.Join(sourceDir, "a/b/c/hello.txt"), path.Join(targetDir, "a/b/c"), "--recursive", "--overwrite")
assertFileContent(t, ctx, targetFiler, "a/b/c/hello.txt", "hello, world\n")
}
}
func TestAccFsCpErrorsWhenSourceIsDirWithoutRecursiveFlag(t *testing.T) {
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
w, err := databricks.NewWorkspaceClient()
require.NoError(t, err)
tmpDir := temporaryDbfsDir(t, w)
_, _, err = RequireErrorRun(t, "fs", "cp", "dbfs:"+tmpDir, "dbfs:/tmp")
assert.Equal(t, fmt.Sprintf("source path %s is a directory. Please specify the --recursive flag", strings.TrimPrefix(tmpDir, "/")), err.Error())
}
func TestAccFsCpErrorsOnNoScheme(t *testing.T) {
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
_, _, err := RequireErrorRun(t, "fs", "cp", "/a", "/b")
assert.Equal(t, "no scheme specified for path /a. Please specify scheme \"dbfs\" or \"file\". Example: file:/foo/bar or file:/c:/foo/bar", err.Error())
}
func TestAccFsCpErrorsOnInvalidScheme(t *testing.T) {
t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV"))
_, _, err := RequireErrorRun(t, "fs", "cp", "dbfs:/a", "https:/b")
assert.Equal(t, "unsupported scheme https specified for path https:/b. Please specify scheme \"dbfs\" or \"file\". Example: file:/foo/bar or file:/c:/foo/bar", err.Error())
}
func TestAccFsCpSourceIsDirectoryButTargetIsFile(t *testing.T) {
ctx := context.Background()
table := setupTable()
for _, row := range table {
sourceFiler, sourceDir := row.setupSource(t)
targetFiler, targetDir := row.setupTarget(t)
setupSourceDir(t, ctx, sourceFiler)
// Write a conflicting file to target
err := targetFiler.Write(ctx, "my_target", strings.NewReader("I'll block any attempts to recursively copy"), filer.CreateParentDirectories)
require.NoError(t, err)
_, _, err = RequireErrorRun(t, "fs", "cp", sourceDir, path.Join(targetDir, "my_target"), "--recursive", "--overwrite")
assert.Error(t, err)
}
}

View File

@ -240,6 +240,7 @@ func NewCobraTestRunner(t *testing.T, args ...string) *cobraTestRunner {
} }
func RequireSuccessfulRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer) { func RequireSuccessfulRun(t *testing.T, args ...string) (bytes.Buffer, bytes.Buffer) {
t.Logf("run args: [%s]", strings.Join(args, ", "))
c := NewCobraTestRunner(t, args...) c := NewCobraTestRunner(t, args...)
stdout, stderr, err := c.Run() stdout, stderr, err := c.Run()
require.NoError(t, err) require.NoError(t, err)

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"io"
"io/fs" "io/fs"
"math/rand" "math/rand"
"sync" "sync"
@ -114,18 +115,23 @@ func TestAccLock(t *testing.T) {
if i == indexOfActiveLocker { if i == indexOfActiveLocker {
continue continue
} }
err := lockers[i].PutFile(ctx, "foo.json", []byte(`'{"surname":"Khan", "name":"Shah Rukh"}`)) err := lockers[i].Write(ctx, "foo.json", []byte(`'{"surname":"Khan", "name":"Shah Rukh"}`))
assert.ErrorContains(t, err, "failed to put file. deploy lock not held") assert.ErrorContains(t, err, "failed to put file. deploy lock not held")
} }
// active locker file write succeeds // active locker file write succeeds
err = lockers[indexOfActiveLocker].PutFile(ctx, "foo.json", []byte(`{"surname":"Khan", "name":"Shah Rukh"}`)) err = lockers[indexOfActiveLocker].Write(ctx, "foo.json", []byte(`{"surname":"Khan", "name":"Shah Rukh"}`))
assert.NoError(t, err) assert.NoError(t, err)
// active locker file read succeeds with expected results // read active locker file
bytes, err := lockers[indexOfActiveLocker].GetRawJsonFileContent(ctx, "foo.json") r, err := lockers[indexOfActiveLocker].Read(ctx, "foo.json")
require.NoError(t, err)
b, err := io.ReadAll(r)
require.NoError(t, err)
// assert on active locker content
var res map[string]string var res map[string]string
json.Unmarshal(bytes, &res) json.Unmarshal(b, &res)
assert.NoError(t, err) assert.NoError(t, err)
assert.Equal(t, "Khan", res["surname"]) assert.Equal(t, "Khan", res["surname"])
assert.Equal(t, "Shah Rukh", res["name"]) assert.Equal(t, "Shah Rukh", res["name"])
@ -135,7 +141,7 @@ func TestAccLock(t *testing.T) {
if i == indexOfActiveLocker { if i == indexOfActiveLocker {
continue continue
} }
_, err = lockers[i].GetRawJsonFileContent(ctx, "foo.json") _, err = lockers[i].Read(ctx, "foo.json")
assert.ErrorContains(t, err, "failed to get file. deploy lock not held") assert.ErrorContains(t, err, "failed to get file. deploy lock not held")
} }

View File

@ -175,6 +175,5 @@ func (w *LocalClient) Stat(ctx context.Context, name string) (fs.FileInfo, error
if os.IsNotExist(err) { if os.IsNotExist(err) {
return nil, FileDoesNotExistError{path: absPath} return nil, FileDoesNotExistError{path: absPath}
} }
return stat, err return stat, err
} }

View File

@ -7,7 +7,7 @@ import (
"errors" "errors"
"fmt" "fmt"
"io" "io"
"strings" "io/fs"
"time" "time"
"github.com/databricks/cli/libs/filer" "github.com/databricks/cli/libs/filer"
@ -88,7 +88,7 @@ func (locker *Locker) GetActiveLockState(ctx context.Context) (*LockState, error
// holder details if locker does not hold the lock // holder details if locker does not hold the lock
func (locker *Locker) assertLockHeld(ctx context.Context) error { func (locker *Locker) assertLockHeld(ctx context.Context) error {
activeLockState, err := locker.GetActiveLockState(ctx) activeLockState, err := locker.GetActiveLockState(ctx)
if err != nil && strings.Contains(err.Error(), "File not found.") { if errors.Is(err, fs.ErrNotExist) {
return fmt.Errorf("no active lock on target dir: %s", err) return fmt.Errorf("no active lock on target dir: %s", err)
} }
if err != nil { if err != nil {
@ -104,22 +104,18 @@ func (locker *Locker) assertLockHeld(ctx context.Context) error {
} }
// idempotent function since overwrite is set to true // idempotent function since overwrite is set to true
func (locker *Locker) PutFile(ctx context.Context, pathToFile string, content []byte) error { func (locker *Locker) Write(ctx context.Context, pathToFile string, content []byte) error {
if !locker.Active { if !locker.Active {
return fmt.Errorf("failed to put file. deploy lock not held") return fmt.Errorf("failed to put file. deploy lock not held")
} }
return locker.filer.Write(ctx, pathToFile, bytes.NewReader(content), filer.OverwriteIfExists, filer.CreateParentDirectories) return locker.filer.Write(ctx, pathToFile, bytes.NewReader(content), filer.OverwriteIfExists, filer.CreateParentDirectories)
} }
func (locker *Locker) GetRawJsonFileContent(ctx context.Context, path string) ([]byte, error) { func (locker *Locker) Read(ctx context.Context, path string) (io.ReadCloser, error) {
if !locker.Active { if !locker.Active {
return nil, fmt.Errorf("failed to get file. deploy lock not held") return nil, fmt.Errorf("failed to get file. deploy lock not held")
} }
reader, err := locker.filer.Read(ctx, path) return locker.filer.Read(ctx, path)
if err != nil {
return nil, err
}
return io.ReadAll(reader)
} }
func (locker *Locker) Lock(ctx context.Context, isForced bool) error { func (locker *Locker) Lock(ctx context.Context, isForced bool) error {