From 4a03265dc2af6ac39fa78674e2a7f6bc991c4d49 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 19 Jun 2023 12:31:07 +0200 Subject: [PATCH 1/6] Fix force flag not working for bundle destroy (#434) ## Changes `--force` flag did not exist for `bundle destroy`. This PR adds that in. ## Tests manually tested. Now adding the `--force` flag hijacks the deploy lock on the target directory. --- cmd/bundle/deploy.go | 6 +++--- cmd/bundle/destroy.go | 4 +++- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/cmd/bundle/deploy.go b/cmd/bundle/deploy.go index 5ecc72d29..7dee32da3 100644 --- a/cmd/bundle/deploy.go +++ b/cmd/bundle/deploy.go @@ -15,7 +15,7 @@ var deployCmd = &cobra.Command{ b := bundle.Get(cmd.Context()) // If `--force` is specified, force acquisition of the deployment lock. - b.Config.Bundle.Lock.Force = force + b.Config.Bundle.Lock.Force = forceDeploy return bundle.Apply(cmd.Context(), b, bundle.Seq( phases.Initialize(), @@ -25,9 +25,9 @@ var deployCmd = &cobra.Command{ }, } -var force bool +var forceDeploy bool func init() { AddCommand(deployCmd) - deployCmd.Flags().BoolVar(&force, "force", false, "Force acquisition of deployment lock.") + deployCmd.Flags().BoolVar(&forceDeploy, "force", false, "Force acquisition of deployment lock.") } diff --git a/cmd/bundle/destroy.go b/cmd/bundle/destroy.go index 979f01a9d..d0fe699a0 100644 --- a/cmd/bundle/destroy.go +++ b/cmd/bundle/destroy.go @@ -22,7 +22,7 @@ var destroyCmd = &cobra.Command{ b := bundle.Get(ctx) // If `--force` is specified, force acquisition of the deployment lock. - b.Config.Bundle.Lock.Force = force + b.Config.Bundle.Lock.Force = forceDestroy // If `--auto-approve`` is specified, we skip confirmation checks b.AutoApprove = autoApprove @@ -51,8 +51,10 @@ var destroyCmd = &cobra.Command{ } var autoApprove bool +var forceDestroy bool func init() { AddCommand(destroyCmd) destroyCmd.Flags().BoolVar(&autoApprove, "auto-approve", false, "Skip interactive approvals for deleting resources and files") + destroyCmd.Flags().BoolVar(&forceDestroy, "force", false, "Force acquisition of deployment lock.") } From 5d036ab6b8295a607867fe565579442ae390cca6 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Mon, 19 Jun 2023 15:57:25 +0200 Subject: [PATCH 2/6] Fix locker unlock for destroy (#492) ## Changes Adds ability for allowing unlock to succeed even if the deploy file is missing. ## Tests Using integration tests and manually --- bundle/deploy/lock/release.go | 30 +++++++++++----- bundle/deployer/deployer.go | 1 + bundle/phases/deploy.go | 2 +- bundle/phases/destroy.go | 2 +- internal/locker_test.go | 66 +++++++++++++++++++++++++++++++++++ libs/locker/locker.go | 36 +++++++++++++------ 6 files changed, 115 insertions(+), 22 deletions(-) diff --git a/bundle/deploy/lock/release.go b/bundle/deploy/lock/release.go index 9b6976fe0..52d271943 100644 --- a/bundle/deploy/lock/release.go +++ b/bundle/deploy/lock/release.go @@ -2,15 +2,26 @@ package lock import ( "context" + "fmt" "github.com/databricks/cli/bundle" + "github.com/databricks/cli/libs/locker" "github.com/databricks/cli/libs/log" ) -type release struct{} +type Goal string -func Release() bundle.Mutator { - return &release{} +const ( + GoalDeploy = Goal("deploy") + GoalDestroy = Goal("destroy") +) + +type release struct { + goal Goal +} + +func Release(goal Goal) bundle.Mutator { + return &release{goal} } func (m *release) Name() string { @@ -32,11 +43,12 @@ func (m *release) Apply(ctx context.Context, b *bundle.Bundle) error { } log.Infof(ctx, "Releasing deployment lock") - err := b.Locker.Unlock(ctx) - if err != nil { - log.Errorf(ctx, "Failed to release deployment lock: %v", err) - return err + switch m.goal { + case GoalDeploy: + return b.Locker.Unlock(ctx) + case GoalDestroy: + return b.Locker.Unlock(ctx, locker.AllowLockFileNotExist) + default: + return fmt.Errorf("unknown goal for lock release: %s", m.goal) } - - return nil } diff --git a/bundle/deployer/deployer.go b/bundle/deployer/deployer.go index 1d780f721..f4f718975 100644 --- a/bundle/deployer/deployer.go +++ b/bundle/deployer/deployer.go @@ -108,6 +108,7 @@ func (d *Deployer) LoadTerraformState(ctx context.Context) error { if err != nil { return err } + defer r.Close() err = os.MkdirAll(d.DefaultTerraformRoot(), os.ModeDir) if err != nil { return err diff --git a/bundle/phases/deploy.go b/bundle/phases/deploy.go index 050b5e561..f2692ea9b 100644 --- a/bundle/phases/deploy.go +++ b/bundle/phases/deploy.go @@ -22,7 +22,7 @@ func Deploy() bundle.Mutator { terraform.Apply(), terraform.StatePush(), ), - lock.Release(), + lock.Release(lock.GoalDeploy), ), ) diff --git a/bundle/phases/destroy.go b/bundle/phases/destroy.go index 5aaccdd27..b0ed5d627 100644 --- a/bundle/phases/destroy.go +++ b/bundle/phases/destroy.go @@ -20,7 +20,7 @@ func Destroy() bundle.Mutator { terraform.StatePush(), files.Delete(), ), - lock.Release(), + lock.Release(lock.GoalDestroy), ), ) diff --git a/internal/locker_test.go b/internal/locker_test.go index f3e026d62..2c7e7aa8e 100644 --- a/internal/locker_test.go +++ b/internal/locker_test.go @@ -11,6 +11,7 @@ import ( "testing" "time" + "github.com/databricks/cli/libs/filer" lockpkg "github.com/databricks/cli/libs/locker" "github.com/databricks/databricks-sdk-go" "github.com/databricks/databricks-sdk-go/service/workspace" @@ -126,6 +127,7 @@ func TestAccLock(t *testing.T) { // read active locker file r, err := lockers[indexOfActiveLocker].Read(ctx, "foo.json") require.NoError(t, err) + defer r.Close() b, err := io.ReadAll(r) require.NoError(t, err) @@ -159,3 +161,67 @@ func TestAccLock(t *testing.T) { assert.NoError(t, err) assert.True(t, lockers[indexOfAnInactiveLocker].Active) } + +func setupLockerTest(ctx context.Context, t *testing.T) (*lockpkg.Locker, filer.Filer) { + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + w, err := databricks.NewWorkspaceClient() + require.NoError(t, err) + + // create temp wsfs dir + tmpDir := temporaryWorkspaceDir(t, w) + f, err := filer.NewWorkspaceFilesClient(w, tmpDir) + require.NoError(t, err) + + // create locker + locker, err := lockpkg.CreateLocker("redfoo@databricks.com", tmpDir, w) + require.NoError(t, err) + + return locker, f +} + +func TestAccLockUnlockWithoutAllowsLockFileNotExist(t *testing.T) { + ctx := context.Background() + locker, f := setupLockerTest(ctx, t) + var err error + + // Acquire lock on tmp directory + err = locker.Lock(ctx, false) + require.NoError(t, err) + + // Assert lock file is created + _, err = f.Stat(ctx, "deploy.lock") + assert.NoError(t, err) + + // Manually delete lock file + err = f.Delete(ctx, "deploy.lock") + assert.NoError(t, err) + + // Assert error, because lock file does not exist + err = locker.Unlock(ctx) + assert.ErrorIs(t, err, fs.ErrNotExist) +} + +func TestAccLockUnlockWithAllowsLockFileNotExist(t *testing.T) { + ctx := context.Background() + locker, f := setupLockerTest(ctx, t) + var err error + + // Acquire lock on tmp directory + err = locker.Lock(ctx, false) + require.NoError(t, err) + assert.True(t, locker.Active) + + // Assert lock file is created + _, err = f.Stat(ctx, "deploy.lock") + assert.NoError(t, err) + + // Manually delete lock file + err = f.Delete(ctx, "deploy.lock") + assert.NoError(t, err) + + // Assert error, because lock file does not exist + err = locker.Unlock(ctx, lockpkg.AllowLockFileNotExist) + assert.NoError(t, err) + assert.False(t, locker.Active) +} diff --git a/libs/locker/locker.go b/libs/locker/locker.go index 7c23f40e7..bb95b7840 100644 --- a/libs/locker/locker.go +++ b/libs/locker/locker.go @@ -13,8 +13,17 @@ import ( "github.com/databricks/cli/libs/filer" "github.com/databricks/databricks-sdk-go" "github.com/google/uuid" + "golang.org/x/exp/slices" ) +type UnlockOption int + +const ( + AllowLockFileNotExist UnlockOption = iota +) + +const LockFileName = "deploy.lock" + // Locker object enables exclusive access to TargetDir's scope for a client. This // enables multiple clients to deploy to the same scope (ie TargetDir) in an atomic // manner @@ -65,10 +74,11 @@ type LockState struct { // GetActiveLockState returns current lock state, irrespective of us holding it. func (locker *Locker) GetActiveLockState(ctx context.Context) (*LockState, error) { - reader, err := locker.filer.Read(ctx, locker.RemotePath()) + reader, err := locker.filer.Read(ctx, LockFileName) if err != nil { return nil, err } + defer reader.Close() bytes, err := io.ReadAll(reader) if err != nil { @@ -89,7 +99,7 @@ func (locker *Locker) GetActiveLockState(ctx context.Context) (*LockState, error func (locker *Locker) assertLockHeld(ctx context.Context) error { activeLockState, err := locker.GetActiveLockState(ctx) if errors.Is(err, fs.ErrNotExist) { - return fmt.Errorf("no active lock on target dir: %s", err) + return fmt.Errorf("no active lock on target dir: %w", err) } if err != nil { return err @@ -140,7 +150,7 @@ func (locker *Locker) Lock(ctx context.Context, isForced bool) error { modes = append(modes, filer.OverwriteIfExists) } - err = locker.filer.Write(ctx, locker.RemotePath(), bytes.NewReader(buf), modes...) + err = locker.filer.Write(ctx, LockFileName, bytes.NewReader(buf), modes...) if err != nil { // If the write failed because the lock file already exists, don't return // the error and instead fall through to [assertLockHeld] below. @@ -161,15 +171,24 @@ func (locker *Locker) Lock(ctx context.Context, isForced bool) error { return nil } -func (locker *Locker) Unlock(ctx context.Context) error { +func (locker *Locker) Unlock(ctx context.Context, opts ...UnlockOption) error { if !locker.Active { return fmt.Errorf("unlock called when lock is not held") } + + // if allowLockFileNotExist is set, do not throw an error if the lock file does + // not exist. This is helpful when destroying a bundle in which case the lock + // file will be deleted before we have a chance to unlock + if _, err := locker.filer.Stat(ctx, LockFileName); errors.Is(err, fs.ErrNotExist) && slices.Contains(opts, AllowLockFileNotExist) { + locker.Active = false + return nil + } + err := locker.assertLockHeld(ctx) if err != nil { - return fmt.Errorf("unlock called when lock is not held: %s", err) + return fmt.Errorf("unlock called when lock is not held: %w", err) } - err = locker.filer.Delete(ctx, locker.RemotePath()) + err = locker.filer.Delete(ctx, LockFileName) if err != nil { return err } @@ -177,11 +196,6 @@ func (locker *Locker) Unlock(ctx context.Context) error { return nil } -func (locker *Locker) RemotePath() string { - // Note: remote paths are scoped to `targetDir`. Also see [CreateLocker]. - return "deploy.lock" -} - func CreateLocker(user string, targetDir string, w *databricks.WorkspaceClient) (*Locker, error) { filer, err := filer.NewWorkspaceFilesClient(w, targetDir) if err != nil { From e19eaca4d19034181cac3e8baf3b4131621fa6d0 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Mon, 19 Jun 2023 20:29:13 +0200 Subject: [PATCH 3/6] Add filer.Filer implementation backed by the Files API (#474) ## Tests New integration test for the read/write parts of the other filers. The integration test cannot be shared just yet because the Files API doesn't include support for creating/listing/removing directories yet. --- cmd/fs/helpers.go | 5 + internal/filer_test.go | 124 ++++++++++++++++++- libs/filer/files_client.go | 241 +++++++++++++++++++++++++++++++++++++ 3 files changed, 368 insertions(+), 2 deletions(-) create mode 100644 libs/filer/files_client.go diff --git a/cmd/fs/helpers.go b/cmd/fs/helpers.go index 72193d9f7..aecee9e08 100644 --- a/cmd/fs/helpers.go +++ b/cmd/fs/helpers.go @@ -28,6 +28,11 @@ func filerForPath(ctx context.Context, fullPath string) (filer.Filer, string, er switch scheme { case DbfsScheme: w := root.WorkspaceClient(ctx) + // If the specified path has the "Volumes" prefix, use the Files API. + if strings.HasPrefix(path, "Volumes/") { + f, err := filer.NewFilesClient(w, "/") + return f, path, err + } f, err := filer.NewDbfsClient(w, "/") return f, path, err diff --git a/internal/filer_test.go b/internal/filer_test.go index bc005feb1..0e126abc1 100644 --- a/internal/filer_test.go +++ b/internal/filer_test.go @@ -82,7 +82,7 @@ func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.NoError(t, err) // Stat on a directory should succeed. - // Note: size and modification time behave differently between WSFS and DBFS. + // Note: size and modification time behave differently between backends. info, err := f.Stat(ctx, "/foo") require.NoError(t, err) assert.Equal(t, "foo", info.Name()) @@ -90,7 +90,7 @@ func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) { assert.Equal(t, true, info.IsDir()) // Stat on a file should succeed. - // Note: size and modification time behave differently between WSFS and DBFS. + // Note: size and modification time behave differently between backends. info, err = f.Stat(ctx, "/foo/bar") require.NoError(t, err) assert.Equal(t, "bar", info.Name()) @@ -465,3 +465,123 @@ func TestAccFilerLocalReadDir(t *testing.T) { ctx, f := setupFilerLocalTest(t) runFilerReadDirTest(t, ctx, f) } + +func temporaryVolumeDir(t *testing.T, w *databricks.WorkspaceClient) string { + // Assume this test is run against the internal testing workspace. + path := RandomName("/Volumes/bogdanghita/default/v3_shared/cli-testing/integration-test-filer-") + + // The Files API doesn't include support for creating and removing directories yet. + // Directories are created implicitly by writing a file to a path that doesn't exist. + // We therefore assume we can use the specified path without creating it first. + t.Logf("using dbfs:%s", path) + + return path +} + +func setupFilerFilesApiTest(t *testing.T) (context.Context, filer.Filer) { + t.SkipNow() // until available on prod + t.Log(GetEnvOrSkipTest(t, "CLOUD_ENV")) + + ctx := context.Background() + w := databricks.Must(databricks.NewWorkspaceClient()) + tmpdir := temporaryVolumeDir(t, w) + f, err := filer.NewFilesClient(w, tmpdir) + require.NoError(t, err) + return ctx, f +} + +func TestAccFilerFilesApiReadWrite(t *testing.T) { + ctx, f := setupFilerFilesApiTest(t) + + // The Files API doesn't know about directories yet. + // Below is a copy of [runFilerReadWriteTest] with + // assertions that don't work commented out. + + var err error + + // Write should fail because the root path doesn't yet exist. + // err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`)) + // assert.True(t, errors.As(err, &filer.NoSuchDirectoryError{})) + // assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Read should fail because the root path doesn't yet exist. + _, err = f.Read(ctx, "/foo/bar") + assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Read should fail because the path points to a directory + // err = f.Mkdir(ctx, "/dir") + // require.NoError(t, err) + // _, err = f.Read(ctx, "/dir") + // assert.ErrorIs(t, err, fs.ErrInvalid) + + // Write with CreateParentDirectories flag should succeed. + err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello world`), filer.CreateParentDirectories) + assert.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "/foo/bar", `hello world`) + + // Write should fail because there is an existing file at the specified path. + err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`)) + assert.True(t, errors.As(err, &filer.FileAlreadyExistsError{})) + assert.True(t, errors.Is(err, fs.ErrExist)) + + // Write with OverwriteIfExists should succeed. + err = f.Write(ctx, "/foo/bar", strings.NewReader(`hello universe`), filer.OverwriteIfExists) + assert.NoError(t, err) + filerTest{t, f}.assertContents(ctx, "/foo/bar", `hello universe`) + + // Write should succeed if there is no existing file at the specified path. + err = f.Write(ctx, "/foo/qux", strings.NewReader(`hello universe`)) + assert.NoError(t, err) + + // Stat on a directory should succeed. + // Note: size and modification time behave differently between backends. + info, err := f.Stat(ctx, "/foo") + require.NoError(t, err) + assert.Equal(t, "foo", info.Name()) + assert.True(t, info.Mode().IsDir()) + assert.Equal(t, true, info.IsDir()) + + // Stat on a file should succeed. + // Note: size and modification time behave differently between backends. + info, err = f.Stat(ctx, "/foo/bar") + require.NoError(t, err) + assert.Equal(t, "bar", info.Name()) + assert.True(t, info.Mode().IsRegular()) + assert.Equal(t, false, info.IsDir()) + + // Delete should fail if the file doesn't exist. + err = f.Delete(ctx, "/doesnt_exist") + assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Stat should fail if the file doesn't exist. + _, err = f.Stat(ctx, "/doesnt_exist") + assert.True(t, errors.As(err, &filer.FileDoesNotExistError{})) + assert.True(t, errors.Is(err, fs.ErrNotExist)) + + // Delete should succeed for file that does exist. + err = f.Delete(ctx, "/foo/bar") + assert.NoError(t, err) + + // Delete should fail for a non-empty directory. + err = f.Delete(ctx, "/foo") + assert.True(t, errors.As(err, &filer.DirectoryNotEmptyError{})) + assert.True(t, errors.Is(err, fs.ErrInvalid)) + + // Delete should succeed for a non-empty directory if the DeleteRecursively flag is set. + // err = f.Delete(ctx, "/foo", filer.DeleteRecursively) + // assert.NoError(t, err) + + // Delete of the filer root should ALWAYS fail, otherwise subsequent writes would fail. + // It is not in the filer's purview to delete its root directory. + err = f.Delete(ctx, "/") + assert.True(t, errors.As(err, &filer.CannotDeleteRootError{})) + assert.True(t, errors.Is(err, fs.ErrInvalid)) +} + +func TestAccFilerFilesApiReadDir(t *testing.T) { + t.Skipf("no support for ReadDir yet") + ctx, f := setupFilerFilesApiTest(t) + runFilerReadDirTest(t, ctx, f) +} diff --git a/libs/filer/files_client.go b/libs/filer/files_client.go new file mode 100644 index 000000000..6c1f5a97e --- /dev/null +++ b/libs/filer/files_client.go @@ -0,0 +1,241 @@ +package filer + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "io/fs" + "net/http" + "net/url" + "path" + "strings" + "time" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/client" + "golang.org/x/exp/slices" +) + +// Type that implements fs.FileInfo for the Files API. +type filesApiFileInfo struct { + absPath string + isDir bool +} + +func (info filesApiFileInfo) Name() string { + return path.Base(info.absPath) +} + +func (info filesApiFileInfo) Size() int64 { + // No way to get the file size in the Files API. + return 0 +} + +func (info filesApiFileInfo) Mode() fs.FileMode { + mode := fs.ModePerm + if info.isDir { + mode |= fs.ModeDir + } + return mode +} + +func (info filesApiFileInfo) ModTime() time.Time { + return time.Time{} +} + +func (info filesApiFileInfo) IsDir() bool { + return info.isDir +} + +func (info filesApiFileInfo) Sys() any { + return nil +} + +// FilesClient implements the [Filer] interface for the Files API backend. +type FilesClient struct { + workspaceClient *databricks.WorkspaceClient + apiClient *client.DatabricksClient + + // File operations will be relative to this path. + root RootPath +} + +func filesNotImplementedError(fn string) error { + return fmt.Errorf("filer.%s is not implemented for the Files API", fn) +} + +func NewFilesClient(w *databricks.WorkspaceClient, root string) (Filer, error) { + apiClient, err := client.New(w.Config) + if err != nil { + return nil, err + } + + return &FilesClient{ + workspaceClient: w, + apiClient: apiClient, + + root: NewRootPath(root), + }, nil +} + +func (w *FilesClient) urlPath(name string) (string, string, error) { + absPath, err := w.root.Join(name) + if err != nil { + return "", "", err + } + + // The user specified part of the path must be escaped. + urlPath := fmt.Sprintf( + "/api/2.0/fs/files/%s", + url.PathEscape(strings.TrimLeft(absPath, "/")), + ) + + return absPath, urlPath, nil +} + +func (w *FilesClient) Write(ctx context.Context, name string, reader io.Reader, mode ...WriteMode) error { + absPath, urlPath, err := w.urlPath(name) + if err != nil { + return err + } + + overwrite := slices.Contains(mode, OverwriteIfExists) + urlPath = fmt.Sprintf("%s?overwrite=%t", urlPath, overwrite) + err = w.apiClient.Do(ctx, http.MethodPut, urlPath, reader, nil, + func(r *http.Request) error { + r.Header.Set("Content-Type", "application/octet-stream") + return nil + }) + + // Return early on success. + if err == nil { + return nil + } + + // Special handling of this error only if it is an API error. + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + return err + } + + // This API returns 409 if the file already exists, when the object type is file + if aerr.StatusCode == http.StatusConflict { + return FileAlreadyExistsError{absPath} + } + + return err +} + +func (w *FilesClient) Read(ctx context.Context, name string) (io.ReadCloser, error) { + absPath, urlPath, err := w.urlPath(name) + if err != nil { + return nil, err + } + + var buf bytes.Buffer + err = w.apiClient.Do(ctx, http.MethodGet, urlPath, nil, &buf) + + // Return early on success. + if err == nil { + return io.NopCloser(&buf), nil + } + + // Special handling of this error only if it is an API error. + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + return nil, err + } + + // This API returns a 404 if the specified path does not exist. + if aerr.StatusCode == http.StatusNotFound { + return nil, FileDoesNotExistError{absPath} + } + + return nil, err +} + +func (w *FilesClient) Delete(ctx context.Context, name string, mode ...DeleteMode) error { + absPath, urlPath, err := w.urlPath(name) + if err != nil { + return err + } + + // Illegal to delete the root path. + if absPath == w.root.rootPath { + return CannotDeleteRootError{} + } + + err = w.apiClient.Do(ctx, http.MethodDelete, urlPath, nil, nil) + + // Return early on success. + if err == nil { + return nil + } + + // Special handling of this error only if it is an API error. + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + return err + } + + // This API returns a 404 if the specified path does not exist. + if aerr.StatusCode == http.StatusNotFound { + return FileDoesNotExistError{absPath} + } + + // This API returns 409 if the underlying path is a directory. + if aerr.StatusCode == http.StatusConflict { + return DirectoryNotEmptyError{absPath} + } + + return err +} + +func (w *FilesClient) ReadDir(ctx context.Context, name string) ([]fs.DirEntry, error) { + return nil, filesNotImplementedError("ReadDir") +} + +func (w *FilesClient) Mkdir(ctx context.Context, name string) error { + // Directories are created implicitly. + // No need to do anything. + return nil +} + +func (w *FilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) { + absPath, urlPath, err := w.urlPath(name) + if err != nil { + return nil, err + } + + err = w.apiClient.Do(ctx, http.MethodHead, urlPath, nil, nil, + func(r *http.Request) error { + r.Header.Del("Content-Type") + return nil + }) + + // If the HEAD requests succeeds, the file exists. + if err == nil { + return filesApiFileInfo{absPath: absPath, isDir: false}, nil + } + + // Special handling of this error only if it is an API error. + var aerr *apierr.APIError + if !errors.As(err, &aerr) { + return nil, err + } + + // This API returns a 404 if the specified path does not exist. + if aerr.StatusCode == http.StatusNotFound { + return nil, FileDoesNotExistError{absPath} + } + + // This API returns 409 if the underlying path is a directory. + if aerr.StatusCode == http.StatusConflict { + return filesApiFileInfo{absPath: absPath, isDir: true}, nil + } + + return nil, err +} From 8804b9d8ba5a93234248d7f1fd892785158c4009 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 06:26:41 +0000 Subject: [PATCH 4/6] Bump golang.org/x/sync from 0.2.0 to 0.3.0 (#495) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 7f38a0c02..78706aa27 100644 --- a/go.mod +++ b/go.mod @@ -25,7 +25,7 @@ require ( golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 golang.org/x/mod v0.10.0 golang.org/x/oauth2 v0.9.0 - golang.org/x/sync v0.2.0 + golang.org/x/sync v0.3.0 golang.org/x/term v0.9.0 golang.org/x/text v0.10.0 gopkg.in/ini.v1 v1.67.0 // Apache 2.0 diff --git a/go.sum b/go.sum index 7db9721f8..d9f7e5d01 100644 --- a/go.sum +++ b/go.sum @@ -198,8 +198,8 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.2.0 h1:PUR+T4wwASmuSTYdKjYHI5TD22Wy5ogLU5qZCOLxBrI= -golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= +golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= From d671516b3936f3e68a00025e421cacc46fb0a8fc Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Tue, 20 Jun 2023 06:37:55 +0000 Subject: [PATCH 5/6] Bump golang.org/x/mod from 0.10.0 to 0.11.0 (#496) --- go.mod | 2 +- go.sum | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/go.mod b/go.mod index 78706aa27..afec49331 100644 --- a/go.mod +++ b/go.mod @@ -23,7 +23,7 @@ require ( github.com/stretchr/testify v1.8.4 // MIT github.com/whilp/git-urls v1.0.0 // MIT golang.org/x/exp v0.0.0-20230310171629-522b1b587ee0 - golang.org/x/mod v0.10.0 + golang.org/x/mod v0.11.0 golang.org/x/oauth2 v0.9.0 golang.org/x/sync v0.3.0 golang.org/x/term v0.9.0 diff --git a/go.sum b/go.sum index d9f7e5d01..70e492268 100644 --- a/go.sum +++ b/go.sum @@ -172,8 +172,8 @@ golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTk golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.10.0 h1:lFO9qtOdlre5W1jxS3r/4szv2/6iXxScdzjoBMXNhYk= -golang.org/x/mod v0.10.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= +golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= From 63e80a0b7120e0ac242e474e2dc7bd4d9d956727 Mon Sep 17 00:00:00 2001 From: Pieter Noordhuis Date: Tue, 20 Jun 2023 10:22:50 +0200 Subject: [PATCH 6/6] Release v0.200.0 (#499) ## Changes This version marks the first version available as public preview. The minor bump to 200 better disambiguates between Databricks CLI "v1" (the Python version) and this version, Databricks CLI "v2". The minor version of 0.100 may look lower than 0.17 to some, whereas 200 does not. This bump has no other significance. CLI: * Add filer.Filer implementation backed by the Files API ([#474](https://github.com/databricks/cli/pull/474)). * Add fs cp command ([#463](https://github.com/databricks/cli/pull/463)). * Correctly set ExactArgs if generated command has positional arguments ([#488](https://github.com/databricks/cli/pull/488)). * Do not use white color as string output ([#489](https://github.com/databricks/cli/pull/489)). * Update README to reflect public preview status ([#491](https://github.com/databricks/cli/pull/491)). Bundles: * Fix force flag not working for bundle destroy ([#434](https://github.com/databricks/cli/pull/434)). * Fix locker unlock for destroy ([#492](https://github.com/databricks/cli/pull/492)). * Use better error assertions and clean up locker API ([#490](https://github.com/databricks/cli/pull/490)). Dependencies: * Bump golang.org/x/mod from 0.10.0 to 0.11.0 ([#496](https://github.com/databricks/cli/pull/496)). * Bump golang.org/x/sync from 0.2.0 to 0.3.0 ([#495](https://github.com/databricks/cli/pull/495)). --- CHANGELOG.md | 24 ++++++++++++++++++++++++ 1 file changed, 24 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index 534c3bca9..e7675f7c3 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Version changelog +## 0.200.0 + +This version marks the first version available as public preview. + +The minor bump to 200 better disambiguates between Databricks CLI "v1" (the Python version) +and this version, Databricks CLI "v2". The minor version of 0.100 may look lower than 0.17 +to some, whereas 200 does not. This bump has no other significance. + +CLI: + * Add filer.Filer implementation backed by the Files API ([#474](https://github.com/databricks/cli/pull/474)). + * Add fs cp command ([#463](https://github.com/databricks/cli/pull/463)). + * Correctly set ExactArgs if generated command has positional arguments ([#488](https://github.com/databricks/cli/pull/488)). + * Do not use white color as string output ([#489](https://github.com/databricks/cli/pull/489)). + * Update README to reflect public preview status ([#491](https://github.com/databricks/cli/pull/491)). + +Bundles: + * Fix force flag not working for bundle destroy ([#434](https://github.com/databricks/cli/pull/434)). + * Fix locker unlock for destroy ([#492](https://github.com/databricks/cli/pull/492)). + * Use better error assertions and clean up locker API ([#490](https://github.com/databricks/cli/pull/490)). + +Dependencies: + * Bump golang.org/x/mod from 0.10.0 to 0.11.0 ([#496](https://github.com/databricks/cli/pull/496)). + * Bump golang.org/x/sync from 0.2.0 to 0.3.0 ([#495](https://github.com/databricks/cli/pull/495)). + ## 0.100.4 CLI: