Merge remote-tracking branch 'origin' into import_dir

This commit is contained in:
Shreyas Goenka 2023-06-02 00:18:12 +02:00
commit 709987448a
No known key found for this signature in database
GPG Key ID: 92A07DF49CCB0622
8 changed files with 180 additions and 29 deletions

View File

@ -3,10 +3,13 @@ package root
import (
"context"
"fmt"
"io"
"os"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/cli/libs/log"
"github.com/fatih/color"
"golang.org/x/exp/slog"
)
@ -16,6 +19,72 @@ const (
envLogFormat = "DATABRICKS_LOG_FORMAT"
)
type friendlyHandler struct {
slog.Handler
w io.Writer
}
var (
levelTrace = color.New(color.FgYellow).Sprint("TRACE")
levelDebug = color.New(color.FgYellow).Sprint("DEBUG")
levelInfo = color.New(color.FgGreen).Sprintf("%5s", "INFO")
levelWarn = color.New(color.FgMagenta).Sprintf("%5s", "WARN")
levelError = color.New(color.FgRed).Sprint("ERROR")
)
func (l *friendlyHandler) coloredLevel(rec slog.Record) string {
switch rec.Level {
case log.LevelTrace:
return levelTrace
case slog.LevelDebug:
return levelDebug
case slog.LevelInfo:
return levelInfo
case slog.LevelWarn:
return levelWarn
case log.LevelError:
return levelError
}
return ""
}
func (l *friendlyHandler) Handle(ctx context.Context, rec slog.Record) error {
t := fmt.Sprintf("%02d:%02d", rec.Time.Hour(), rec.Time.Minute())
attrs := ""
rec.Attrs(func(a slog.Attr) {
attrs += fmt.Sprintf(" %s%s%s",
color.CyanString(a.Key),
color.CyanString("="),
color.YellowString(a.Value.String()))
})
msg := fmt.Sprintf("%s %s %s%s\n",
color.MagentaString(t),
l.coloredLevel(rec),
color.HiWhiteString(rec.Message),
attrs)
_, err := l.w.Write([]byte(msg))
return err
}
func makeLogHandler(opts slog.HandlerOptions) (slog.Handler, error) {
switch logOutput {
case flags.OutputJSON:
return opts.NewJSONHandler(logFile.Writer()), nil
case flags.OutputText:
w := logFile.Writer()
if cmdio.IsTTY(w) {
return &friendlyHandler{
Handler: opts.NewTextHandler(w),
w: w,
}, nil
}
return opts.NewTextHandler(w), nil
default:
return nil, fmt.Errorf("invalid log output mode: %s", logOutput)
}
}
func initializeLogger(ctx context.Context) (context.Context, error) {
opts := slog.HandlerOptions{}
opts.Level = logLevel.Level()
@ -31,14 +100,9 @@ func initializeLogger(ctx context.Context) (context.Context, error) {
return nil, err
}
var handler slog.Handler
switch logOutput {
case flags.OutputJSON:
handler = opts.NewJSONHandler(logFile.Writer())
case flags.OutputText:
handler = opts.NewTextHandler(logFile.Writer())
default:
return nil, fmt.Errorf("invalid log output: %s", logOutput)
handler, err := makeLogHandler(opts)
if err != nil {
return nil, err
}
slog.SetDefault(slog.New(handler))

View File

@ -1,34 +1,25 @@
package version
import (
"encoding/json"
"fmt"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/internal/build"
"github.com/databricks/cli/libs/cmdio"
"github.com/spf13/cobra"
)
var detail = false
var versionCmd = &cobra.Command{
Use: "version",
Args: cobra.NoArgs,
RunE: func(cmd *cobra.Command, args []string) error {
info := build.GetInfo()
if detail {
enc := json.NewEncoder(cmd.OutOrStdout())
enc.SetIndent("", " ")
return enc.Encode(info)
}
Annotations: map[string]string{
"template": "Databricks CLI v{{.Version}}\n",
},
_, err := fmt.Fprintf(cmd.OutOrStdout(), "Databricks CLI v%s\n", info.Version)
return err
RunE: func(cmd *cobra.Command, args []string) error {
return cmdio.Render(cmd.Context(), build.GetInfo())
},
}
func init() {
versionCmd.Flags().BoolVar(&detail, "detail", false, "output detailed version information as JSON")
root.RootCmd.AddCommand(versionCmd)
}

View File

@ -67,11 +67,32 @@ func runFilerReadWriteTest(t *testing.T, ctx context.Context, f filer.Filer) {
assert.NoError(t, err)
filerTest{t, f}.assertContents(ctx, "/foo/bar", `hello universe`)
// Stat on a directory should succeed.
// Note: size and modification time behave differently between WSFS and DBFS.
info, err := f.Stat(ctx, "/foo")
require.NoError(t, err)
assert.Equal(t, "foo", info.Name())
assert.True(t, info.Mode().IsDir())
assert.Equal(t, true, info.IsDir())
// Stat on a file should succeed.
// Note: size and modification time behave differently between WSFS and DBFS.
info, err = f.Stat(ctx, "/foo/bar")
require.NoError(t, err)
assert.Equal(t, "bar", info.Name())
assert.True(t, info.Mode().IsRegular())
assert.Equal(t, false, info.IsDir())
// Delete should fail if the file doesn't exist.
err = f.Delete(ctx, "/doesnt_exist")
assert.True(t, errors.As(err, &filer.FileDoesNotExistError{}))
assert.True(t, errors.Is(err, fs.ErrNotExist))
// Stat should fail if the file doesn't exist.
_, err = f.Stat(ctx, "/doesnt_exist")
assert.True(t, errors.As(err, &filer.FileDoesNotExistError{}))
assert.True(t, errors.Is(err, fs.ErrNotExist))
// Delete should succeed for file that does exist.
err = f.Delete(ctx, "/foo/bar")
assert.NoError(t, err)

View File

@ -1,6 +1,7 @@
package internal
import (
"encoding/json"
"fmt"
"testing"
@ -27,3 +28,15 @@ func TestVersionCommand(t *testing.T) {
assert.Equal(t, expectedVersion, stdout.String())
assert.Equal(t, "", stderr.String())
}
func TestVersionCommandWithJSONOutput(t *testing.T) {
stdout, stderr := RequireSuccessfulRun(t, "version", "--output", "json")
assert.NotEmpty(t, stdout.String())
assert.Equal(t, "", stderr.String())
// Deserialize stdout and confirm we see the right fields.
var output map[string]any
err := json.Unmarshal(stdout.Bytes(), &output)
assert.NoError(t, err)
assert.Equal(t, build.GetInfo().Version, output["Version"])
}

View File

@ -46,6 +46,16 @@ func IsInteractive(ctx context.Context) bool {
return c.interactive
}
// IsTTY detects if io.Writer is a terminal.
func IsTTY(w io.Writer) bool {
f, ok := w.(*os.File)
if !ok {
return false
}
fd := f.Fd()
return isatty.IsTerminal(fd) || isatty.IsCygwinTerminal(fd)
}
// IsTTY detects if stdout is a terminal. It assumes that stderr is terminal as well
func (c *cmdIO) IsTTY() bool {
f, ok := c.out.(*os.File)

View File

@ -22,11 +22,7 @@ type dbfsDirEntry struct {
}
func (entry dbfsDirEntry) Type() fs.FileMode {
typ := fs.ModePerm
if entry.fi.IsDir {
typ |= fs.ModeDir
}
return typ
return entry.Mode()
}
func (entry dbfsDirEntry) Info() (fs.FileInfo, error) {
@ -47,7 +43,11 @@ func (info dbfsFileInfo) Size() int64 {
}
func (info dbfsFileInfo) Mode() fs.FileMode {
return fs.ModePerm
mode := fs.ModePerm
if info.fi.IsDir {
mode |= fs.ModeDir
}
return mode
}
func (info dbfsFileInfo) ModTime() time.Time {
@ -244,3 +244,29 @@ func (w *DbfsClient) Mkdir(ctx context.Context, name string) error {
return w.workspaceClient.Dbfs.MkdirsByPath(ctx, dirPath)
}
func (w *DbfsClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
absPath, err := w.root.Join(name)
if err != nil {
return nil, err
}
info, err := w.workspaceClient.Dbfs.GetStatusByPath(ctx, absPath)
if err != nil {
var aerr *apierr.APIError
if !errors.As(err, &aerr) {
return nil, err
}
// This API returns a 404 if the file doesn't exist.
if aerr.StatusCode == http.StatusNotFound {
if aerr.ErrorCode == "RESOURCE_DOES_NOT_EXIST" {
return nil, FileDoesNotExistError{absPath}
}
}
return nil, err
}
return dbfsFileInfo{*info}, nil
}

View File

@ -83,4 +83,7 @@ type Filer interface {
// Creates directory at `path`, creating any intermediate directories as required.
Mkdir(ctx context.Context, path string) error
// Stat returns information about the file at `path`.
Stat(ctx context.Context, name string) (fs.FileInfo, error)
}

View File

@ -262,3 +262,26 @@ func (w *WorkspaceFilesClient) Mkdir(ctx context.Context, name string) error {
Path: dirPath,
})
}
func (w *WorkspaceFilesClient) Stat(ctx context.Context, name string) (fs.FileInfo, error) {
absPath, err := w.root.Join(name)
if err != nil {
return nil, err
}
info, err := w.workspaceClient.Workspace.GetStatusByPath(ctx, absPath)
if err != nil {
// If we got an API error we deal with it below.
var aerr *apierr.APIError
if !errors.As(err, &aerr) {
return nil, err
}
// This API returns a 404 if the specified path does not exist.
if aerr.StatusCode == http.StatusNotFound {
return nil, FileDoesNotExistError{absPath}
}
}
return wsfsFileInfo{*info}, nil
}