mirror of https://github.com/databricks/cli.git
Enable perfsprint linter and apply autofix (#2071)
https://github.com/catenacyber/perfsprint
This commit is contained in:
parent
3629c9e406
commit
e2cd8c2f34
|
@ -14,6 +14,7 @@ linters:
|
|||
- testifylint
|
||||
- intrange
|
||||
- mirror
|
||||
- perfsprint
|
||||
linters-settings:
|
||||
govet:
|
||||
enable-all: true
|
||||
|
|
|
@ -2,7 +2,6 @@ package artifacts
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
@ -88,16 +87,16 @@ func TestExpandGlobs_InvalidPattern(t *testing.T) {
|
|||
))
|
||||
|
||||
assert.Len(t, diags, 4)
|
||||
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[0].Summary)
|
||||
assert.Equal(t, filepath.Clean("a[.txt")+": syntax error in pattern", diags[0].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[0].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[0].source", diags[0].Paths[0].String())
|
||||
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("a[.txt")), diags[1].Summary)
|
||||
assert.Equal(t, filepath.Clean("a[.txt")+": syntax error in pattern", diags[1].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[1].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[1].source", diags[1].Paths[0].String())
|
||||
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("../a[.txt")), diags[2].Summary)
|
||||
assert.Equal(t, filepath.Clean("../a[.txt")+": syntax error in pattern", diags[2].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[2].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[2].source", diags[2].Paths[0].String())
|
||||
assert.Equal(t, fmt.Sprintf("%s: syntax error in pattern", filepath.Clean("subdir/a[.txt")), diags[3].Summary)
|
||||
assert.Equal(t, filepath.Clean("subdir/a[.txt")+": syntax error in pattern", diags[3].Summary)
|
||||
assert.Equal(t, filepath.Join(tmpDir, "databricks.yml"), diags[3].Locations[0].File)
|
||||
assert.Equal(t, "artifacts.test.files[3].source", diags[3].Paths[0].String())
|
||||
}
|
||||
|
|
|
@ -32,7 +32,7 @@ func (m *infer) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
|
|||
//)
|
||||
|
||||
py := python.GetExecutable()
|
||||
artifact.BuildCommand = fmt.Sprintf(`%s setup.py bdist_wheel`, py)
|
||||
artifact.BuildCommand = py + " setup.py bdist_wheel"
|
||||
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -8,6 +8,7 @@ package bundle
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
@ -234,7 +235,7 @@ func (b *Bundle) GetSyncIncludePatterns(ctx context.Context) ([]string, error) {
|
|||
// we call into from this bundle context.
|
||||
func (b *Bundle) AuthEnv() (map[string]string, error) {
|
||||
if b.client == nil {
|
||||
return nil, fmt.Errorf("workspace client not initialized yet")
|
||||
return nil, errors.New("workspace client not initialized yet")
|
||||
}
|
||||
|
||||
cfg := b.client.Config
|
||||
|
|
|
@ -2,7 +2,7 @@ package config
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/databricks/cli/libs/exec"
|
||||
)
|
||||
|
@ -37,7 +37,7 @@ type Artifact struct {
|
|||
|
||||
func (a *Artifact) Build(ctx context.Context) ([]byte, error) {
|
||||
if a.BuildCommand == "" {
|
||||
return nil, fmt.Errorf("no build property defined")
|
||||
return nil, errors.New("no build property defined")
|
||||
}
|
||||
|
||||
var e *exec.Executor
|
||||
|
|
|
@ -2,7 +2,6 @@ package mutator
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
|
@ -33,7 +32,7 @@ func (m *expandWorkspaceRoot) Apply(ctx context.Context, b *bundle.Bundle) diag.
|
|||
}
|
||||
|
||||
if strings.HasPrefix(root, "~/") {
|
||||
home := fmt.Sprintf("/Workspace/Users/%s", currentUser.UserName)
|
||||
home := "/Workspace/Users/" + currentUser.UserName
|
||||
b.Config.Workspace.RootPath = path.Join(home, root[2:])
|
||||
}
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ func (m *prependWorkspacePrefix) Apply(ctx context.Context, b *bundle.Bundle) di
|
|||
}
|
||||
}
|
||||
|
||||
return dyn.NewValue(fmt.Sprintf("/Workspace%s", path), v.Locations()), nil
|
||||
return dyn.NewValue("/Workspace"+path, v.Locations()), nil
|
||||
})
|
||||
if err != nil {
|
||||
return dyn.InvalidValue, err
|
||||
|
|
|
@ -2,6 +2,7 @@ package python
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
|
@ -319,15 +320,15 @@ func TestCreateOverrideVisitor(t *testing.T) {
|
|||
updatePath: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||
deletePath: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||
insertPath: dyn.MustPathFromString("resources.jobs.job0.name"),
|
||||
deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (delete)"),
|
||||
insertError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (insert)"),
|
||||
updateError: fmt.Errorf("unexpected change at \"resources.jobs.job0.name\" (update)"),
|
||||
deleteError: errors.New("unexpected change at \"resources.jobs.job0.name\" (delete)"),
|
||||
insertError: errors.New("unexpected change at \"resources.jobs.job0.name\" (insert)"),
|
||||
updateError: errors.New("unexpected change at \"resources.jobs.job0.name\" (update)"),
|
||||
},
|
||||
{
|
||||
name: "load: can't delete an existing job",
|
||||
phase: PythonMutatorPhaseLoad,
|
||||
deletePath: dyn.MustPathFromString("resources.jobs.job0"),
|
||||
deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"),
|
||||
deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"),
|
||||
},
|
||||
{
|
||||
name: "load: can insert 'resources'",
|
||||
|
@ -353,9 +354,9 @@ func TestCreateOverrideVisitor(t *testing.T) {
|
|||
deletePath: dyn.MustPathFromString("include[0]"),
|
||||
insertPath: dyn.MustPathFromString("include[0]"),
|
||||
updatePath: dyn.MustPathFromString("include[0]"),
|
||||
deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"),
|
||||
insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"),
|
||||
updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"),
|
||||
deleteError: errors.New("unexpected change at \"include[0]\" (delete)"),
|
||||
insertError: errors.New("unexpected change at \"include[0]\" (insert)"),
|
||||
updateError: errors.New("unexpected change at \"include[0]\" (update)"),
|
||||
},
|
||||
{
|
||||
name: "init: can change an existing job",
|
||||
|
@ -371,7 +372,7 @@ func TestCreateOverrideVisitor(t *testing.T) {
|
|||
name: "init: can't delete an existing job",
|
||||
phase: PythonMutatorPhaseInit,
|
||||
deletePath: dyn.MustPathFromString("resources.jobs.job0"),
|
||||
deleteError: fmt.Errorf("unexpected change at \"resources.jobs.job0\" (delete)"),
|
||||
deleteError: errors.New("unexpected change at \"resources.jobs.job0\" (delete)"),
|
||||
},
|
||||
{
|
||||
name: "init: can insert 'resources'",
|
||||
|
@ -397,9 +398,9 @@ func TestCreateOverrideVisitor(t *testing.T) {
|
|||
deletePath: dyn.MustPathFromString("include[0]"),
|
||||
insertPath: dyn.MustPathFromString("include[0]"),
|
||||
updatePath: dyn.MustPathFromString("include[0]"),
|
||||
deleteError: fmt.Errorf("unexpected change at \"include[0]\" (delete)"),
|
||||
insertError: fmt.Errorf("unexpected change at \"include[0]\" (insert)"),
|
||||
updateError: fmt.Errorf("unexpected change at \"include[0]\" (update)"),
|
||||
deleteError: errors.New("unexpected change at \"include[0]\" (delete)"),
|
||||
insertError: errors.New("unexpected change at \"include[0]\" (insert)"),
|
||||
updateError: errors.New("unexpected change at \"include[0]\" (update)"),
|
||||
},
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package mutator
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/config/variable"
|
||||
|
@ -68,7 +68,7 @@ func lookupForComplexVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) {
|
|||
}
|
||||
|
||||
if vv.Type == variable.VariableTypeComplex {
|
||||
return dyn.InvalidValue, fmt.Errorf("complex variables cannot contain references to another complex variables")
|
||||
return dyn.InvalidValue, errors.New("complex variables cannot contain references to another complex variables")
|
||||
}
|
||||
|
||||
return lookup(v, path)
|
||||
|
@ -100,7 +100,7 @@ func lookupForVariables(v dyn.Value, path dyn.Path) (dyn.Value, error) {
|
|||
}
|
||||
|
||||
if vv.Lookup != nil && vv.Lookup.String() != "" {
|
||||
return dyn.InvalidValue, fmt.Errorf("lookup variables cannot contain references to another lookup variables")
|
||||
return dyn.InvalidValue, errors.New("lookup variables cannot contain references to another lookup variables")
|
||||
}
|
||||
|
||||
return lookup(v, path)
|
||||
|
|
|
@ -2,7 +2,6 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/databricks/cli/libs/log"
|
||||
|
@ -45,7 +44,7 @@ func (s *Cluster) InitializeURL(baseURL url.URL) {
|
|||
if s.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("compute/clusters/%s", s.ID)
|
||||
baseURL.Path = "compute/clusters/" + s.ID
|
||||
s.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strconv"
|
||||
|
||||
|
@ -52,7 +51,7 @@ func (j *Job) InitializeURL(baseURL url.URL) {
|
|||
if j.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("jobs/%s", j.ID)
|
||||
baseURL.Path = "jobs/" + j.ID
|
||||
j.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/databricks/cli/libs/log"
|
||||
|
@ -47,7 +46,7 @@ func (s *MlflowExperiment) InitializeURL(baseURL url.URL) {
|
|||
if s.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("ml/experiments/%s", s.ID)
|
||||
baseURL.Path = "ml/experiments/" + s.ID
|
||||
s.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/databricks/cli/libs/log"
|
||||
|
@ -47,7 +46,7 @@ func (s *MlflowModel) InitializeURL(baseURL url.URL) {
|
|||
if s.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("ml/models/%s", s.ID)
|
||||
baseURL.Path = "ml/models/" + s.ID
|
||||
s.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/databricks/cli/libs/log"
|
||||
|
@ -55,7 +54,7 @@ func (s *ModelServingEndpoint) InitializeURL(baseURL url.URL) {
|
|||
if s.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("ml/endpoints/%s", s.ID)
|
||||
baseURL.Path = "ml/endpoints/" + s.ID
|
||||
s.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -25,5 +25,5 @@ func (p Permission) String() string {
|
|||
return fmt.Sprintf("level: %s, group_name: %s", p.Level, p.GroupName)
|
||||
}
|
||||
|
||||
return fmt.Sprintf("level: %s", p.Level)
|
||||
return "level: " + p.Level
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
|
||||
"github.com/databricks/cli/libs/log"
|
||||
|
@ -47,7 +46,7 @@ func (p *Pipeline) InitializeURL(baseURL url.URL) {
|
|||
if p.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("pipelines/%s", p.ID)
|
||||
baseURL.Path = "pipelines/" + p.ID
|
||||
p.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
|
@ -51,7 +50,7 @@ func (s *QualityMonitor) InitializeURL(baseURL url.URL) {
|
|||
if s.TableName == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.TableName, ".", "/"))
|
||||
baseURL.Path = "explore/data/" + strings.ReplaceAll(s.TableName, ".", "/")
|
||||
s.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
|
@ -57,7 +56,7 @@ func (s *RegisteredModel) InitializeURL(baseURL url.URL) {
|
|||
if s.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("explore/data/models/%s", strings.ReplaceAll(s.ID, ".", "/"))
|
||||
baseURL.Path = "explore/data/models/" + strings.ReplaceAll(s.ID, ".", "/")
|
||||
s.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
|
@ -26,7 +26,7 @@ type Schema struct {
|
|||
}
|
||||
|
||||
func (s *Schema) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
||||
return false, fmt.Errorf("schema.Exists() is not supported")
|
||||
return false, errors.New("schema.Exists() is not supported")
|
||||
}
|
||||
|
||||
func (s *Schema) TerraformResourceName() string {
|
||||
|
@ -37,7 +37,7 @@ func (s *Schema) InitializeURL(baseURL url.URL) {
|
|||
if s.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("explore/data/%s", strings.ReplaceAll(s.ID, ".", "/"))
|
||||
baseURL.Path = "explore/data/" + strings.ReplaceAll(s.ID, ".", "/")
|
||||
s.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package resources
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
|
@ -34,7 +34,7 @@ func (v Volume) MarshalJSON() ([]byte, error) {
|
|||
}
|
||||
|
||||
func (v *Volume) Exists(ctx context.Context, w *databricks.WorkspaceClient, id string) (bool, error) {
|
||||
return false, fmt.Errorf("volume.Exists() is not supported")
|
||||
return false, errors.New("volume.Exists() is not supported")
|
||||
}
|
||||
|
||||
func (v *Volume) TerraformResourceName() string {
|
||||
|
@ -45,7 +45,7 @@ func (v *Volume) InitializeURL(baseURL url.URL) {
|
|||
if v.ID == "" {
|
||||
return
|
||||
}
|
||||
baseURL.Path = fmt.Sprintf("explore/data/volumes/%s", strings.ReplaceAll(v.ID, ".", "/"))
|
||||
baseURL.Path = "explore/data/volumes/" + strings.ReplaceAll(v.ID, ".", "/")
|
||||
v.URL = baseURL.String()
|
||||
}
|
||||
|
||||
|
|
|
@ -4,6 +4,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strconv"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
|
@ -60,7 +61,7 @@ func checkFolderPermission(ctx context.Context, b bundle.ReadOnlyBundle, folderP
|
|||
}
|
||||
|
||||
objPermissions, err := w.GetPermissions(ctx, workspace.GetWorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
|
||||
WorkspaceObjectId: strconv.FormatInt(obj.ObjectId, 10),
|
||||
WorkspaceObjectType: "directories",
|
||||
})
|
||||
if err != nil {
|
||||
|
|
|
@ -2,7 +2,6 @@ package validate
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -102,7 +101,7 @@ func (m *uniqueResourceKeys) Apply(ctx context.Context, b *bundle.Bundle) diag.D
|
|||
// If there are multiple resources with the same key, report an error.
|
||||
diags = append(diags, diag.Diagnostic{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("multiple resources have been defined with the same key: %s", k),
|
||||
Summary: "multiple resources have been defined with the same key: " + k,
|
||||
Locations: v.locations,
|
||||
Paths: v.paths,
|
||||
})
|
||||
|
|
|
@ -68,7 +68,7 @@ func findVolumeInBundle(r config.Root, catalogName, schemaName, volumeName strin
|
|||
if v.SchemaName != schemaName && !isSchemaDefinedInBundle {
|
||||
continue
|
||||
}
|
||||
pathString := fmt.Sprintf("resources.volumes.%s", k)
|
||||
pathString := "resources.volumes." + k
|
||||
return dyn.MustPathFromString(pathString), r.GetLocations(pathString), true
|
||||
}
|
||||
return nil, nil, false
|
||||
|
|
|
@ -2,7 +2,6 @@ package validate
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -152,7 +151,7 @@ func TestExtractVolumeFromPath(t *testing.T) {
|
|||
|
||||
for _, p := range invalidVolumePaths() {
|
||||
_, _, _, err := extractVolumeFromPath(p)
|
||||
assert.EqualError(t, err, fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p))
|
||||
assert.EqualError(t, err, "expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got "+p)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -171,7 +170,7 @@ func TestValidateArtifactPathWithInvalidPaths(t *testing.T) {
|
|||
diags := bundle.ApplyReadOnly(context.Background(), bundle.ReadOnly(b), ValidateArtifactPath())
|
||||
require.Equal(t, diag.Diagnostics{{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got %s", p),
|
||||
Summary: "expected UC volume path to be in the format /Volumes/<catalog>/<schema>/<volume>/..., got " + p,
|
||||
Locations: []dyn.Location{{File: "config.yml", Line: 1, Column: 2}},
|
||||
Paths: []dyn.Path{dyn.MustPathFromString("workspace.artifact_path")},
|
||||
}}, diags)
|
||||
|
|
|
@ -2,7 +2,7 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -83,11 +83,11 @@ func (l *Lookup) constructResolver() (resolver, error) {
|
|||
|
||||
switch len(resolvers) {
|
||||
case 0:
|
||||
return nil, fmt.Errorf("no valid lookup fields provided")
|
||||
return nil, errors.New("no valid lookup fields provided")
|
||||
case 1:
|
||||
return resolvers[0], nil
|
||||
default:
|
||||
return nil, fmt.Errorf("exactly one lookup field must be provided")
|
||||
return nil, errors.New("exactly one lookup field must be provided")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +15,9 @@ func (l resolveAlert) Resolve(ctx context.Context, w *databricks.WorkspaceClient
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.Id), nil
|
||||
return entity.Id, nil
|
||||
}
|
||||
|
||||
func (l resolveAlert) String() string {
|
||||
return fmt.Sprintf("alert: %s", l.name)
|
||||
return "alert: " + l.name
|
||||
}
|
||||
|
|
|
@ -42,5 +42,5 @@ func (l resolveCluster) Resolve(ctx context.Context, w *databricks.WorkspaceClie
|
|||
}
|
||||
|
||||
func (l resolveCluster) String() string {
|
||||
return fmt.Sprintf("cluster: %s", l.name)
|
||||
return "cluster: " + l.name
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +15,9 @@ func (l resolveClusterPolicy) Resolve(ctx context.Context, w *databricks.Workspa
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.PolicyId), nil
|
||||
return entity.PolicyId, nil
|
||||
}
|
||||
|
||||
func (l resolveClusterPolicy) String() string {
|
||||
return fmt.Sprintf("cluster-policy: %s", l.name)
|
||||
return "cluster-policy: " + l.name
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +15,9 @@ func (l resolveDashboard) Resolve(ctx context.Context, w *databricks.WorkspaceCl
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.Id), nil
|
||||
return entity.Id, nil
|
||||
}
|
||||
|
||||
func (l resolveDashboard) String() string {
|
||||
return fmt.Sprintf("dashboard: %s", l.name)
|
||||
return "dashboard: " + l.name
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +15,9 @@ func (l resolveInstancePool) Resolve(ctx context.Context, w *databricks.Workspac
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.InstancePoolId), nil
|
||||
return entity.InstancePoolId, nil
|
||||
}
|
||||
|
||||
func (l resolveInstancePool) String() string {
|
||||
return fmt.Sprintf("instance-pool: %s", l.name)
|
||||
return "instance-pool: " + l.name
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +16,9 @@ func (l resolveJob) Resolve(ctx context.Context, w *databricks.WorkspaceClient)
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.JobId), nil
|
||||
return strconv.FormatInt(entity.JobId, 10), nil
|
||||
}
|
||||
|
||||
func (l resolveJob) String() string {
|
||||
return fmt.Sprintf("job: %s", l.name)
|
||||
return "job: " + l.name
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +15,9 @@ func (l resolveMetastore) Resolve(ctx context.Context, w *databricks.WorkspaceCl
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.MetastoreId), nil
|
||||
return entity.MetastoreId, nil
|
||||
}
|
||||
|
||||
func (l resolveMetastore) String() string {
|
||||
return fmt.Sprintf("metastore: %s", l.name)
|
||||
return "metastore: " + l.name
|
||||
}
|
||||
|
|
|
@ -42,5 +42,5 @@ func (l resolveNotificationDestination) Resolve(ctx context.Context, w *databric
|
|||
}
|
||||
|
||||
func (l resolveNotificationDestination) String() string {
|
||||
return fmt.Sprintf("notification-destination: %s", l.name)
|
||||
return "notification-destination: " + l.name
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/experimental/mocks"
|
||||
|
@ -35,7 +35,7 @@ func TestResolveNotificationDestination_ResolveError(t *testing.T) {
|
|||
api := m.GetMockNotificationDestinationsAPI()
|
||||
api.EXPECT().
|
||||
ListAll(mock.Anything, mock.Anything).
|
||||
Return(nil, fmt.Errorf("bad"))
|
||||
Return(nil, errors.New("bad"))
|
||||
|
||||
ctx := context.Background()
|
||||
l := resolveNotificationDestination{name: "destination"}
|
||||
|
|
|
@ -2,7 +2,6 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +15,9 @@ func (l resolvePipeline) Resolve(ctx context.Context, w *databricks.WorkspaceCli
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.PipelineId), nil
|
||||
return entity.PipelineId, nil
|
||||
}
|
||||
|
||||
func (l resolvePipeline) String() string {
|
||||
return fmt.Sprintf("pipeline: %s", l.name)
|
||||
return "pipeline: " + l.name
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +15,9 @@ func (l resolveQuery) Resolve(ctx context.Context, w *databricks.WorkspaceClient
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.Id), nil
|
||||
return entity.Id, nil
|
||||
}
|
||||
|
||||
func (l resolveQuery) String() string {
|
||||
return fmt.Sprintf("query: %s", l.name)
|
||||
return "query: " + l.name
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +15,9 @@ func (l resolveServicePrincipal) Resolve(ctx context.Context, w *databricks.Work
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.ApplicationId), nil
|
||||
return entity.ApplicationId, nil
|
||||
}
|
||||
|
||||
func (l resolveServicePrincipal) String() string {
|
||||
return fmt.Sprintf("service-principal: %s", l.name)
|
||||
return "service-principal: " + l.name
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package variable
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go"
|
||||
)
|
||||
|
@ -16,9 +15,9 @@ func (l resolveWarehouse) Resolve(ctx context.Context, w *databricks.WorkspaceCl
|
|||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return fmt.Sprint(entity.Id), nil
|
||||
return entity.Id, nil
|
||||
}
|
||||
|
||||
func (l resolveWarehouse) String() string {
|
||||
return fmt.Sprintf("warehouse: %s", l.name)
|
||||
return "warehouse: " + l.name
|
||||
}
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package variable
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
@ -68,7 +69,7 @@ func (v *Variable) Set(val VariableValue) error {
|
|||
switch rv.Kind() {
|
||||
case reflect.Struct, reflect.Array, reflect.Slice, reflect.Map:
|
||||
if v.Type != VariableTypeComplex {
|
||||
return fmt.Errorf("variable type is not complex")
|
||||
return errors.New("variable type is not complex")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3,6 +3,7 @@ package deploy
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/fs"
|
||||
|
@ -95,7 +96,7 @@ func (e *entry) Type() fs.FileMode {
|
|||
|
||||
func (e *entry) Info() (fs.FileInfo, error) {
|
||||
if e.info == nil {
|
||||
return nil, fmt.Errorf("no info available")
|
||||
return nil, errors.New("no info available")
|
||||
}
|
||||
return e.info, nil
|
||||
}
|
||||
|
|
|
@ -72,7 +72,7 @@ func (l *checkDashboardsModifiedRemotely) Apply(ctx context.Context, b *bundle.B
|
|||
continue
|
||||
}
|
||||
|
||||
path := dyn.MustPathFromString(fmt.Sprintf("resources.dashboards.%s", dashboard.Name))
|
||||
path := dyn.MustPathFromString("resources.dashboards." + dashboard.Name)
|
||||
loc := b.Config.GetLocation(path.String())
|
||||
actual, err := b.WorkspaceClient().Lakeview.GetByDashboardId(ctx, dashboard.ID)
|
||||
if err != nil {
|
||||
|
|
|
@ -2,7 +2,7 @@ package terraform
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
|
@ -122,7 +122,7 @@ func TestCheckDashboardsModifiedRemotely_ExistingStateFailureToGet(t *testing.T)
|
|||
dashboardsAPI := m.GetMockLakeviewAPI()
|
||||
dashboardsAPI.EXPECT().
|
||||
GetByDashboardId(mock.Anything, "id1").
|
||||
Return(nil, fmt.Errorf("failure")).
|
||||
Return(nil, errors.New("failure")).
|
||||
Once()
|
||||
b.SetWorkpaceClient(m.WorkspaceClient)
|
||||
|
||||
|
|
|
@ -230,7 +230,7 @@ func setUserAgentExtraEnvVar(environ map[string]string, b *bundle.Bundle) error
|
|||
// Add "cli" to the user agent in set by the Databricks Terraform provider.
|
||||
// This will allow us to attribute downstream requests made by the Databricks
|
||||
// Terraform provider to the CLI.
|
||||
products := []string{fmt.Sprintf("cli/%s", build.GetInfo().Version)}
|
||||
products := []string{"cli/" + build.GetInfo().Version}
|
||||
if experimental := b.Config.Experimental; experimental != nil {
|
||||
if experimental.PyDABs.Enabled {
|
||||
products = append(products, "databricks-pydabs/0.0.0")
|
||||
|
|
|
@ -2,6 +2,7 @@ package terraform
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
|
@ -58,7 +59,7 @@ func (l *load) validateState(state *resourcesState) error {
|
|||
}
|
||||
|
||||
if len(state.Resources) == 0 && slices.Contains(l.modes, ErrorOnEmptyState) {
|
||||
return fmt.Errorf("no deployment state. Did you forget to run 'databricks bundle deploy'?")
|
||||
return errors.New("no deployment state. Did you forget to run 'databricks bundle deploy'?")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package libraries
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/service/compute"
|
||||
)
|
||||
|
@ -20,5 +20,5 @@ func libraryPath(library *compute.Library) (string, error) {
|
|||
return library.Requirements, nil
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("not supported library type")
|
||||
return "", errors.New("not supported library type")
|
||||
}
|
||||
|
|
|
@ -3,6 +3,7 @@ package permissions
|
|||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/libraries"
|
||||
|
@ -78,7 +79,7 @@ func setPermissions(ctx context.Context, w workspace.WorkspaceInterface, path st
|
|||
}
|
||||
|
||||
_, err = w.SetPermissions(ctx, workspace.WorkspaceObjectPermissionsRequest{
|
||||
WorkspaceObjectId: fmt.Sprint(obj.ObjectId),
|
||||
WorkspaceObjectId: strconv.FormatInt(obj.ObjectId, 10),
|
||||
WorkspaceObjectType: "directories",
|
||||
AccessControlList: permissions,
|
||||
})
|
||||
|
|
|
@ -2,7 +2,7 @@ package phases
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
"github.com/databricks/cli/bundle/artifacts"
|
||||
|
@ -54,7 +54,7 @@ func filterDeleteOrRecreateActions(changes []*tfjson.ResourceChange, resourceTyp
|
|||
func approvalForDeploy(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
||||
tf := b.Terraform
|
||||
if tf == nil {
|
||||
return false, fmt.Errorf("terraform not initialized")
|
||||
return false, errors.New("terraform not initialized")
|
||||
}
|
||||
|
||||
// read plan file
|
||||
|
@ -111,7 +111,7 @@ is removed from the catalog, but the underlying files are not deleted:`
|
|||
}
|
||||
|
||||
if !cmdio.IsPromptSupported(ctx) {
|
||||
return false, fmt.Errorf("the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||
return false, errors.New("the deployment requires destructive actions, but current console does not support prompting. Please specify --auto-approve if you would like to skip prompts and proceed")
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, "")
|
||||
|
|
|
@ -3,7 +3,6 @@ package phases
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -34,7 +33,7 @@ func assertRootPathExists(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
|||
func approvalForDestroy(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
||||
tf := b.Terraform
|
||||
if tf == nil {
|
||||
return false, fmt.Errorf("terraform not initialized")
|
||||
return false, errors.New("terraform not initialized")
|
||||
}
|
||||
|
||||
// read plan file
|
||||
|
@ -63,7 +62,7 @@ func approvalForDestroy(ctx context.Context, b *bundle.Bundle) (bool, error) {
|
|||
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("All files and directories at the following location will be deleted: %s", b.Config.Workspace.RootPath))
|
||||
cmdio.LogString(ctx, "All files and directories at the following location will be deleted: "+b.Config.Workspace.RootPath)
|
||||
cmdio.LogString(ctx, "")
|
||||
|
||||
if b.AutoApprove {
|
||||
|
|
|
@ -2,6 +2,7 @@ package bundle
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
|
@ -21,7 +22,7 @@ func getRootEnv(ctx context.Context) (string, error) {
|
|||
}
|
||||
stat, err := os.Stat(path)
|
||||
if err == nil && !stat.IsDir() {
|
||||
err = fmt.Errorf("not a directory")
|
||||
err = errors.New("not a directory")
|
||||
}
|
||||
if err != nil {
|
||||
return "", fmt.Errorf(`invalid bundle root %s="%s": %w`, env.RootVariable, path, err)
|
||||
|
|
|
@ -3,6 +3,7 @@ package run
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
@ -181,13 +182,13 @@ func (r *jobRunner) Run(ctx context.Context, opts *Options) (output.RunOutput, e
|
|||
// callback to log progress events. Called on every poll request
|
||||
progressLogger, ok := cmdio.FromContext(ctx)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no progress logger found")
|
||||
return nil, errors.New("no progress logger found")
|
||||
}
|
||||
logProgress := logProgressCallback(ctx, progressLogger)
|
||||
|
||||
waiter, err := w.Jobs.RunNow(ctx, *req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("cannot start job")
|
||||
return nil, errors.New("cannot start job")
|
||||
}
|
||||
|
||||
if opts.NoWait {
|
||||
|
@ -266,7 +267,7 @@ func (r *jobRunner) convertPythonParams(opts *Options) error {
|
|||
|
||||
if len(opts.Job.pythonParams) > 0 {
|
||||
if _, ok := opts.Job.notebookParams["__python_params"]; ok {
|
||||
return fmt.Errorf("can't use __python_params as notebook param, the name is reserved for internal use")
|
||||
return errors.New("can't use __python_params as notebook param, the name is reserved for internal use")
|
||||
}
|
||||
p, err := json.Marshal(opts.Job.pythonParams)
|
||||
if err != nil {
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package run
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"strconv"
|
||||
|
||||
"github.com/databricks/cli/bundle/config/resources"
|
||||
|
@ -60,16 +60,16 @@ func (o *JobOptions) hasJobParametersConfigured() bool {
|
|||
// Validate returns if the combination of options is valid.
|
||||
func (o *JobOptions) Validate(job *resources.Job) error {
|
||||
if job == nil {
|
||||
return fmt.Errorf("job not defined")
|
||||
return errors.New("job not defined")
|
||||
}
|
||||
|
||||
// Ensure mutual exclusion on job parameters and task parameters.
|
||||
hasJobParams := len(job.Parameters) > 0
|
||||
if hasJobParams && o.hasTaskParametersConfigured() {
|
||||
return fmt.Errorf("the job to run defines job parameters; specifying task parameters is not allowed")
|
||||
return errors.New("the job to run defines job parameters; specifying task parameters is not allowed")
|
||||
}
|
||||
if !hasJobParams && o.hasJobParametersConfigured() {
|
||||
return fmt.Errorf("the job to run does not define job parameters; specifying job parameters is not allowed")
|
||||
return errors.New("the job to run does not define job parameters; specifying job parameters is not allowed")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
@ -80,7 +80,7 @@ func (o *JobOptions) validatePipelineParams() (*jobs.PipelineParams, error) {
|
|||
return nil, nil
|
||||
}
|
||||
|
||||
defaultErr := fmt.Errorf("job run argument --pipeline-params only supports `full_refresh=<bool>`")
|
||||
defaultErr := errors.New("job run argument --pipeline-params only supports `full_refresh=<bool>`")
|
||||
v, ok := o.pipelineParams["full_refresh"]
|
||||
if !ok {
|
||||
return nil, defaultErr
|
||||
|
|
|
@ -47,7 +47,7 @@ func (out *JobOutput) String() (string, error) {
|
|||
}
|
||||
result.WriteString("=======\n")
|
||||
result.WriteString(fmt.Sprintf("Task %s:\n", v.TaskKey))
|
||||
result.WriteString(fmt.Sprintf("%s\n", taskString))
|
||||
result.WriteString(taskString + "\n")
|
||||
}
|
||||
return result.String(), nil
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package output
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/databricks-sdk-go/service/jobs"
|
||||
)
|
||||
|
@ -27,7 +26,7 @@ func structToString(val any) (string, error) {
|
|||
|
||||
func (out *NotebookOutput) String() (string, error) {
|
||||
if out.Truncated {
|
||||
return fmt.Sprintf("%s\n[truncated...]\n", out.Result), nil
|
||||
return out.Result + "\n[truncated...]\n", nil
|
||||
}
|
||||
return out.Result, nil
|
||||
}
|
||||
|
@ -42,7 +41,7 @@ func (out *DbtOutput) String() (string, error) {
|
|||
// JSON is used because it's a convenient representation.
|
||||
// If user needs machine parsable output, they can use the --output json
|
||||
// flag
|
||||
return fmt.Sprintf("Dbt Task Output:\n%s", outputString), nil
|
||||
return "Dbt Task Output:\n" + outputString, nil
|
||||
}
|
||||
|
||||
func (out *SqlOutput) String() (string, error) {
|
||||
|
@ -55,12 +54,12 @@ func (out *SqlOutput) String() (string, error) {
|
|||
// JSON is used because it's a convenient representation.
|
||||
// If user needs machine parsable output, they can use the --output json
|
||||
// flag
|
||||
return fmt.Sprintf("SQL Task Output:\n%s", outputString), nil
|
||||
return "SQL Task Output:\n" + outputString, nil
|
||||
}
|
||||
|
||||
func (out *LogsOutput) String() (string, error) {
|
||||
if out.LogsTruncated {
|
||||
return fmt.Sprintf("%s\n[truncated...]\n", out.Logs), nil
|
||||
return out.Logs + "\n[truncated...]\n", nil
|
||||
}
|
||||
return out.Logs, nil
|
||||
}
|
||||
|
|
|
@ -2,6 +2,7 @@ package run
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
|
@ -33,7 +34,7 @@ func (r *pipelineRunner) logEvent(ctx context.Context, event pipelines.PipelineE
|
|||
if event.Error != nil && len(event.Error.Exceptions) > 0 {
|
||||
logString += "trace for most recent exception: \n"
|
||||
for i := range len(event.Error.Exceptions) {
|
||||
logString += fmt.Sprintf("%s\n", event.Error.Exceptions[i].Message)
|
||||
logString += event.Error.Exceptions[i].Message + "\n"
|
||||
}
|
||||
}
|
||||
if logString != "" {
|
||||
|
@ -107,7 +108,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp
|
|||
updateTracker := progress.NewUpdateTracker(pipelineID, updateID, w)
|
||||
progressLogger, ok := cmdio.FromContext(ctx)
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("no progress logger found")
|
||||
return nil, errors.New("no progress logger found")
|
||||
}
|
||||
|
||||
// Log the pipeline update URL as soon as it is available.
|
||||
|
@ -144,7 +145,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp
|
|||
|
||||
if state == pipelines.UpdateInfoStateCanceled {
|
||||
log.Infof(ctx, "Update was cancelled!")
|
||||
return nil, fmt.Errorf("update cancelled")
|
||||
return nil, errors.New("update cancelled")
|
||||
}
|
||||
if state == pipelines.UpdateInfoStateFailed {
|
||||
log.Infof(ctx, "Update has failed!")
|
||||
|
@ -152,7 +153,7 @@ func (r *pipelineRunner) Run(ctx context.Context, opts *Options) (output.RunOutp
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, fmt.Errorf("update failed")
|
||||
return nil, errors.New("update failed")
|
||||
}
|
||||
if state == pipelines.UpdateInfoStateCompleted {
|
||||
log.Infof(ctx, "Update has completed successfully!")
|
||||
|
|
|
@ -33,7 +33,7 @@ func (event *ProgressEvent) String() string {
|
|||
// construct error string if level=`Error`
|
||||
if event.Level == pipelines.EventLevelError && event.Error != nil {
|
||||
for _, exception := range event.Error.Exceptions {
|
||||
result.WriteString(fmt.Sprintf("\n%s", exception.Message))
|
||||
result.WriteString("\n" + exception.Message)
|
||||
}
|
||||
}
|
||||
return result.String()
|
||||
|
|
|
@ -2,7 +2,6 @@ package config_tests
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -219,7 +218,7 @@ func TestRunAsErrorNeitherUserOrSpSpecified(t *testing.T) {
|
|||
|
||||
for _, tc := range tcases {
|
||||
t.Run(tc.name, func(t *testing.T) {
|
||||
bundlePath := fmt.Sprintf("./run_as/not_allowed/neither_sp_nor_user/%s", tc.name)
|
||||
bundlePath := "./run_as/not_allowed/neither_sp_nor_user/" + tc.name
|
||||
b := load(t, bundlePath)
|
||||
|
||||
ctx := context.Background()
|
||||
|
|
|
@ -2,6 +2,7 @@ package trampoline
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
@ -147,7 +148,7 @@ func (t *pythonTrampoline) GetTemplateData(task *jobs.Task) (map[string]any, err
|
|||
|
||||
func (t *pythonTrampoline) generateParameters(task *jobs.PythonWheelTask) (string, error) {
|
||||
if task.Parameters != nil && task.NamedParameters != nil {
|
||||
return "", fmt.Errorf("not allowed to pass both paramaters and named_parameters")
|
||||
return "", errors.New("not allowed to pass both paramaters and named_parameters")
|
||||
}
|
||||
params := append([]string{task.PackageName}, task.Parameters...)
|
||||
for k, v := range task.NamedParameters {
|
||||
|
|
|
@ -2,7 +2,7 @@ package trampoline
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
@ -30,7 +30,7 @@ func (f *functions) GetTasks(b *bundle.Bundle) []TaskWithJobKey {
|
|||
|
||||
func (f *functions) GetTemplateData(task *jobs.Task) (map[string]any, error) {
|
||||
if task.PythonWheelTask == nil {
|
||||
return nil, fmt.Errorf("PythonWheelTask cannot be nil")
|
||||
return nil, errors.New("PythonWheelTask cannot be nil")
|
||||
}
|
||||
|
||||
data := make(map[string]any)
|
||||
|
|
|
@ -2,7 +2,7 @@ package auth
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/databricks/cli/libs/auth"
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
|
@ -36,7 +36,7 @@ GCP: https://docs.gcp.databricks.com/dev-tools/auth/index.html`,
|
|||
|
||||
func promptForHost(ctx context.Context) (string, error) {
|
||||
if !cmdio.IsInTTY(ctx) {
|
||||
return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify a host using --host")
|
||||
return "", errors.New("the command is being run in a non-interactive environment, please specify a host using --host")
|
||||
}
|
||||
|
||||
prompt := cmdio.Prompt(ctx)
|
||||
|
@ -46,7 +46,7 @@ func promptForHost(ctx context.Context) (string, error) {
|
|||
|
||||
func promptForAccountID(ctx context.Context) (string, error) {
|
||||
if !cmdio.IsInTTY(ctx) {
|
||||
return "", fmt.Errorf("the command is being run in a non-interactive environment, please specify an account ID using --account-id")
|
||||
return "", errors.New("the command is being run in a non-interactive environment, please specify an account ID using --account-id")
|
||||
}
|
||||
|
||||
prompt := cmdio.Prompt(ctx)
|
||||
|
|
|
@ -2,7 +2,7 @@ package auth
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
|
@ -102,7 +102,7 @@ func TestGetWorkspaceAuthStatusError(t *testing.T) {
|
|||
"token": "test-token",
|
||||
"auth_type": "azure-cli",
|
||||
})
|
||||
return cfg, false, fmt.Errorf("auth error")
|
||||
return cfg, false, errors.New("auth error")
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, status)
|
||||
|
@ -151,7 +151,7 @@ func TestGetWorkspaceAuthStatusSensitive(t *testing.T) {
|
|||
"token": "test-token",
|
||||
"auth_type": "azure-cli",
|
||||
})
|
||||
return cfg, false, fmt.Errorf("auth error")
|
||||
return cfg, false, errors.New("auth error")
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, status)
|
||||
|
|
|
@ -23,9 +23,9 @@ func canonicalHost(host string) (string, error) {
|
|||
}
|
||||
// If the host is empty, assume the scheme wasn't included.
|
||||
if parsedHost.Host == "" {
|
||||
return fmt.Sprintf("https://%s", host), nil
|
||||
return "https://" + host, nil
|
||||
}
|
||||
return fmt.Sprintf("https://%s", parsedHost.Host), nil
|
||||
return "https://" + parsedHost.Host, nil
|
||||
}
|
||||
|
||||
var ErrNoMatchingProfiles = errors.New("no matching profiles found")
|
||||
|
|
|
@ -176,7 +176,7 @@ depends on the existing profiles you have set in your configuration file
|
|||
func setHostAndAccountId(ctx context.Context, profileName string, persistentAuth *auth.PersistentAuth, args []string) error {
|
||||
// If both [HOST] and --host are provided, return an error.
|
||||
if len(args) > 0 && persistentAuth.Host != "" {
|
||||
return fmt.Errorf("please only provide a host as an argument or a flag, not both")
|
||||
return errors.New("please only provide a host as an argument or a flag, not both")
|
||||
}
|
||||
|
||||
profiler := profile.GetProfiler(ctx)
|
||||
|
|
|
@ -2,7 +2,7 @@ package bundle
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -49,16 +49,16 @@ func newDestroyCommand() *cobra.Command {
|
|||
// we require auto-approve for non tty terminals since interactive consent
|
||||
// is not possible
|
||||
if !term.IsTerminal(int(os.Stderr.Fd())) && !autoApprove {
|
||||
return fmt.Errorf("please specify --auto-approve to skip interactive confirmation checks for non tty consoles")
|
||||
return errors.New("please specify --auto-approve to skip interactive confirmation checks for non tty consoles")
|
||||
}
|
||||
|
||||
// Check auto-approve is selected for json logging
|
||||
logger, ok := cmdio.FromContext(ctx)
|
||||
if !ok {
|
||||
return fmt.Errorf("progress logger not found")
|
||||
return errors.New("progress logger not found")
|
||||
}
|
||||
if logger.Mode == flags.ModeJson && !autoApprove {
|
||||
return fmt.Errorf("please specify --auto-approve since selected logging format is json")
|
||||
return errors.New("please specify --auto-approve since selected logging format is json")
|
||||
}
|
||||
|
||||
diags = bundle.Apply(ctx, b, bundle.Seq(
|
||||
|
|
|
@ -96,7 +96,7 @@ func (d *dashboard) resolveFromPath(ctx context.Context, b *bundle.Bundle) (stri
|
|||
return "", diag.Diagnostics{
|
||||
{
|
||||
Severity: diag.Error,
|
||||
Summary: fmt.Sprintf("expected a dashboard, found a %s", found),
|
||||
Summary: "expected a dashboard, found a " + found,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
@ -188,7 +188,7 @@ func (d *dashboard) saveSerializedDashboard(_ context.Context, b *bundle.Bundle,
|
|||
|
||||
func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, dashboard *dashboards.Dashboard, key string) error {
|
||||
// Save serialized dashboard definition to the dashboard directory.
|
||||
dashboardBasename := fmt.Sprintf("%s.lvdash.json", key)
|
||||
dashboardBasename := key + ".lvdash.json"
|
||||
dashboardPath := filepath.Join(d.dashboardDir, dashboardBasename)
|
||||
err := d.saveSerializedDashboard(ctx, b, dashboard, dashboardPath)
|
||||
if err != nil {
|
||||
|
@ -215,7 +215,7 @@ func (d *dashboard) saveConfiguration(ctx context.Context, b *bundle.Bundle, das
|
|||
}
|
||||
|
||||
// Save the configuration to the resource directory.
|
||||
resourcePath := filepath.Join(d.resourceDir, fmt.Sprintf("%s.dashboard.yml", key))
|
||||
resourcePath := filepath.Join(d.resourceDir, key+".dashboard.yml")
|
||||
saver := yamlsaver.NewSaverWithStyle(map[string]yaml.Style{
|
||||
"display_name": yaml.DoubleQuotedStyle,
|
||||
})
|
||||
|
|
|
@ -85,8 +85,8 @@ func NewGenerateJobCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
|
||||
oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", jobKey))
|
||||
filename := filepath.Join(configDir, fmt.Sprintf("%s.job.yml", jobKey))
|
||||
oldFilename := filepath.Join(configDir, jobKey+".yml")
|
||||
filename := filepath.Join(configDir, jobKey+".job.yml")
|
||||
|
||||
// User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI.
|
||||
// Due to changing in the generated file names, we need to first rename existing resource file to the new name.
|
||||
|
@ -107,7 +107,7 @@ func NewGenerateJobCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Job configuration successfully saved to %s", filename))
|
||||
cmdio.LogString(ctx, "Job configuration successfully saved to "+filename)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -85,8 +85,8 @@ func NewGeneratePipelineCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
|
||||
oldFilename := filepath.Join(configDir, fmt.Sprintf("%s.yml", pipelineKey))
|
||||
filename := filepath.Join(configDir, fmt.Sprintf("%s.pipeline.yml", pipelineKey))
|
||||
oldFilename := filepath.Join(configDir, pipelineKey+".yml")
|
||||
filename := filepath.Join(configDir, pipelineKey+".pipeline.yml")
|
||||
|
||||
// User might continuously run generate command to update their bundle jobs with any changes made in Databricks UI.
|
||||
// Due to changing in the generated file names, we need to first rename existing resource file to the new name.
|
||||
|
@ -109,7 +109,7 @@ func NewGeneratePipelineCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
|
||||
cmdio.LogString(ctx, fmt.Sprintf("Pipeline configuration successfully saved to %s", filename))
|
||||
cmdio.LogString(ctx, "Pipeline configuration successfully saved to "+filename)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
|
|
@ -126,7 +126,7 @@ func (n *downloader) FlushToDisk(ctx context.Context, force bool) error {
|
|||
return err
|
||||
}
|
||||
|
||||
cmdio.LogString(errCtx, fmt.Sprintf("File successfully saved to %s", targetPath))
|
||||
cmdio.LogString(errCtx, "File successfully saved to "+targetPath)
|
||||
return reader.Close()
|
||||
})
|
||||
}
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
"github.com/spf13/cobra"
|
||||
|
@ -19,7 +19,7 @@ func newLaunchCommand() *cobra.Command {
|
|||
}
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("TODO")
|
||||
return errors.New("TODO")
|
||||
// contents, err := os.ReadFile(args[0])
|
||||
// if err != nil {
|
||||
// return err
|
||||
|
|
|
@ -44,7 +44,7 @@ func resolveOpenArgument(ctx context.Context, b *bundle.Bundle, args []string) (
|
|||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return "", fmt.Errorf("expected a KEY of the resource to open")
|
||||
return "", errors.New("expected a KEY of the resource to open")
|
||||
}
|
||||
|
||||
return args[0], nil
|
||||
|
@ -113,7 +113,7 @@ func newOpenCommand() *cobra.Command {
|
|||
// Confirm that the resource has a URL.
|
||||
url := ref.Resource.GetURL()
|
||||
if url == "" {
|
||||
return fmt.Errorf("resource does not have a URL associated with it (has it been deployed?)")
|
||||
return errors.New("resource does not have a URL associated with it (has it been deployed?)")
|
||||
}
|
||||
|
||||
return browser.OpenURL(url)
|
||||
|
|
|
@ -3,6 +3,7 @@ package bundle
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -48,7 +49,7 @@ func resolveRunArgument(ctx context.Context, b *bundle.Bundle, args []string) (s
|
|||
}
|
||||
|
||||
if len(args) < 1 {
|
||||
return "", nil, fmt.Errorf("expected a KEY of the resource to run")
|
||||
return "", nil, errors.New("expected a KEY of the resource to run")
|
||||
}
|
||||
|
||||
return args[0], args[1:], nil
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package bundle
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
@ -17,7 +17,7 @@ func newTestCommand() *cobra.Command {
|
|||
}
|
||||
|
||||
cmd.RunE = func(cmd *cobra.Command, args []string) error {
|
||||
return fmt.Errorf("TODO")
|
||||
return errors.New("TODO")
|
||||
// results := project.RunPythonOnDev(cmd.Context(), `return 1`)
|
||||
// if results.Failed() {
|
||||
// return results.Err()
|
||||
|
|
|
@ -2,6 +2,7 @@ package bundle
|
|||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/bundle"
|
||||
|
@ -39,7 +40,7 @@ func newValidateCommand() *cobra.Command {
|
|||
if err := diags.Error(); err != nil {
|
||||
return diags.Error()
|
||||
} else {
|
||||
return fmt.Errorf("invariant failed: returned bundle is nil")
|
||||
return errors.New("invariant failed: returned bundle is nil")
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package configure
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
|
@ -62,12 +63,12 @@ func configureInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config
|
|||
|
||||
func configureNonInteractive(cmd *cobra.Command, flags *configureFlags, cfg *config.Config) error {
|
||||
if cfg.Host == "" {
|
||||
return fmt.Errorf("host must be set in non-interactive mode")
|
||||
return errors.New("host must be set in non-interactive mode")
|
||||
}
|
||||
|
||||
// Check presence of cluster ID before reading token to fail fast.
|
||||
if flags.ConfigureCluster && cfg.ClusterID == "" {
|
||||
return fmt.Errorf("cluster ID must be set in non-interactive mode")
|
||||
return errors.New("cluster ID must be set in non-interactive mode")
|
||||
}
|
||||
|
||||
// Read token from stdin if not already set.
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package configure
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
"net/url"
|
||||
)
|
||||
|
||||
|
@ -11,10 +11,10 @@ func validateHost(s string) error {
|
|||
return err
|
||||
}
|
||||
if u.Host == "" || u.Scheme != "https" {
|
||||
return fmt.Errorf("must start with https://")
|
||||
return errors.New("must start with https://")
|
||||
}
|
||||
if u.Path != "" && u.Path != "/" {
|
||||
return fmt.Errorf("must use empty path")
|
||||
return errors.New("must use empty path")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -12,7 +12,7 @@ import (
|
|||
const repositoryCacheTTL = 24 * time.Hour
|
||||
|
||||
func NewRepositoryCache(org, cacheDir string) *repositoryCache {
|
||||
filename := fmt.Sprintf("%s-repositories", org)
|
||||
filename := org + "-repositories"
|
||||
return &repositoryCache{
|
||||
cache: localcache.NewLocalCache[Repositories](cacheDir, filename, repositoryCacheTTL),
|
||||
Org: org,
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
package labs
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/databricks/cli/cmd/labs/project"
|
||||
|
@ -49,7 +50,7 @@ func newInstalledCommand() *cobra.Command {
|
|||
})
|
||||
}
|
||||
if len(info.Projects) == 0 {
|
||||
return fmt.Errorf("no projects installed")
|
||||
return errors.New("no projects installed")
|
||||
}
|
||||
return cmdio.Render(ctx, info)
|
||||
},
|
||||
|
|
|
@ -93,7 +93,7 @@ func (r *LocalCache[T]) writeCache(ctx context.Context, data T) (T, error) {
|
|||
}
|
||||
|
||||
func (r *LocalCache[T]) FileName() string {
|
||||
return filepath.Join(r.dir, fmt.Sprintf("%s.json", r.name))
|
||||
return filepath.Join(r.dir, r.name+".json")
|
||||
}
|
||||
|
||||
func (r *LocalCache[T]) loadCache() (*cached[T], error) {
|
||||
|
|
|
@ -3,7 +3,6 @@ package localcache
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/url"
|
||||
"runtime"
|
||||
"testing"
|
||||
|
@ -115,7 +114,7 @@ func TestFolderDisappears(t *testing.T) {
|
|||
func TestRefreshFails(t *testing.T) {
|
||||
c := NewLocalCache[int64](t.TempDir(), "time", 1*time.Minute)
|
||||
tick := func() (int64, error) {
|
||||
return 0, fmt.Errorf("nope")
|
||||
return 0, errors.New("nope")
|
||||
}
|
||||
ctx := context.Background()
|
||||
_, err := c.Load(ctx, tick)
|
||||
|
|
|
@ -175,7 +175,7 @@ func (i *installer) login(ctx context.Context) (*databricks.WorkspaceClient, err
|
|||
return nil, fmt.Errorf("valid: %w", err)
|
||||
}
|
||||
if !i.HasAccountLevelCommands() && cfg.IsAccountClient() {
|
||||
return nil, fmt.Errorf("got account-level client, but no account-level commands")
|
||||
return nil, errors.New("got account-level client, but no account-level commands")
|
||||
}
|
||||
lc := &loginConfig{Entrypoint: i.Installer.Entrypoint}
|
||||
w, err := lc.askWorkspace(ctx, cfg)
|
||||
|
@ -200,10 +200,10 @@ func (i *installer) downloadLibrary(ctx context.Context) error {
|
|||
libTarget := i.LibDir()
|
||||
// we may support wheels, jars, and golang binaries. but those are not zipballs
|
||||
if i.IsZipball() {
|
||||
feedback <- fmt.Sprintf("Downloading and unpacking zipball for %s", i.version)
|
||||
feedback <- "Downloading and unpacking zipball for " + i.version
|
||||
return i.downloadAndUnpackZipball(ctx, libTarget)
|
||||
}
|
||||
return fmt.Errorf("we only support zipballs for now")
|
||||
return errors.New("we only support zipballs for now")
|
||||
}
|
||||
|
||||
func (i *installer) downloadAndUnpackZipball(ctx context.Context, libTarget string) error {
|
||||
|
@ -234,7 +234,7 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr
|
|||
log.Debugf(ctx, "Detected Python %s at: %s", py.Version, py.Path)
|
||||
venvPath := i.virtualEnvPath(ctx)
|
||||
log.Debugf(ctx, "Creating Python Virtual Environment at: %s", venvPath)
|
||||
feedback <- fmt.Sprintf("Creating Virtual Environment with Python %s", py.Version)
|
||||
feedback <- "Creating Virtual Environment with Python " + py.Version
|
||||
_, err = process.Background(ctx, []string{py.Path, "-m", "venv", venvPath})
|
||||
if err != nil {
|
||||
return fmt.Errorf("create venv: %w", err)
|
||||
|
@ -251,8 +251,8 @@ func (i *installer) setupPythonVirtualEnvironment(ctx context.Context, w *databr
|
|||
if !ok {
|
||||
return fmt.Errorf("unsupported runtime: %s", cluster.SparkVersion)
|
||||
}
|
||||
feedback <- fmt.Sprintf("Installing Databricks Connect v%s", runtimeVersion)
|
||||
pipSpec := fmt.Sprintf("databricks-connect==%s", runtimeVersion)
|
||||
feedback <- "Installing Databricks Connect v" + runtimeVersion
|
||||
pipSpec := "databricks-connect==" + runtimeVersion
|
||||
err = i.installPythonDependencies(ctx, pipSpec)
|
||||
if err != nil {
|
||||
return fmt.Errorf("dbconnect: %w", err)
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
package labs
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"errors"
|
||||
|
||||
"github.com/databricks/cli/cmd/labs/project"
|
||||
"github.com/databricks/cli/cmd/root"
|
||||
|
@ -34,7 +34,7 @@ func newShowCommand() *cobra.Command {
|
|||
return err
|
||||
}
|
||||
if len(installed) == 0 {
|
||||
return fmt.Errorf("no projects found")
|
||||
return errors.New("no projects found")
|
||||
}
|
||||
name := args[0]
|
||||
for _, v := range installed {
|
||||
|
|
|
@ -26,7 +26,7 @@ type ErrNoWorkspaceProfiles struct {
|
|||
}
|
||||
|
||||
func (e ErrNoWorkspaceProfiles) Error() string {
|
||||
return fmt.Sprintf("%s does not contain workspace profiles; please create one by running 'databricks configure'", e.path)
|
||||
return e.path + " does not contain workspace profiles; please create one by running 'databricks configure'"
|
||||
}
|
||||
|
||||
type ErrNoAccountProfiles struct {
|
||||
|
@ -34,7 +34,7 @@ type ErrNoAccountProfiles struct {
|
|||
}
|
||||
|
||||
func (e ErrNoAccountProfiles) Error() string {
|
||||
return fmt.Sprintf("%s does not contain account profiles", e.path)
|
||||
return e.path + " does not contain account profiles"
|
||||
}
|
||||
|
||||
func initProfileFlag(cmd *cobra.Command) {
|
||||
|
@ -253,7 +253,7 @@ func AskForWorkspaceProfile(ctx context.Context) (string, error) {
|
|||
return profiles[0].Name, nil
|
||||
}
|
||||
i, _, err := cmdio.RunSelect(ctx, &promptui.Select{
|
||||
Label: fmt.Sprintf("Workspace profiles defined in %s", path),
|
||||
Label: "Workspace profiles defined in " + path,
|
||||
Items: profiles,
|
||||
Searcher: profiles.SearchCaseInsensitive,
|
||||
StartInSearchMode: true,
|
||||
|
@ -287,7 +287,7 @@ func AskForAccountProfile(ctx context.Context) (string, error) {
|
|||
return profiles[0].Name, nil
|
||||
}
|
||||
i, _, err := cmdio.RunSelect(ctx, &promptui.Select{
|
||||
Label: fmt.Sprintf("Account profiles defined in %s", path),
|
||||
Label: "Account profiles defined in " + path,
|
||||
Items: profiles,
|
||||
Searcher: profiles.SearchCaseInsensitive,
|
||||
StartInSearchMode: true,
|
||||
|
|
|
@ -2,7 +2,7 @@ package root
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"errors"
|
||||
"os"
|
||||
|
||||
"github.com/databricks/cli/libs/cmdio"
|
||||
|
@ -37,7 +37,7 @@ func (f *progressLoggerFlag) initializeContext(ctx context.Context) (context.Con
|
|||
|
||||
if f.log.level.String() != "disabled" && f.log.file.String() == "stderr" &&
|
||||
f.ProgressLogFormat == flags.ModeInplace {
|
||||
return nil, fmt.Errorf("inplace progress logging cannot be used when log-file is stderr")
|
||||
return nil, errors.New("inplace progress logging cannot be used when log-file is stderr")
|
||||
}
|
||||
|
||||
format := f.ProgressLogFormat
|
||||
|
|
|
@ -2,7 +2,6 @@ package sync
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"path"
|
||||
"strings"
|
||||
|
||||
|
@ -52,8 +51,8 @@ func completeRemotePath(
|
|||
}
|
||||
|
||||
prefixes := []string{
|
||||
path.Clean(fmt.Sprintf("/Users/%s", me.UserName)) + "/",
|
||||
path.Clean(fmt.Sprintf("/Repos/%s", me.UserName)) + "/",
|
||||
path.Clean("/Users/"+me.UserName) + "/",
|
||||
path.Clean("/Repos/"+me.UserName) + "/",
|
||||
}
|
||||
|
||||
validPrefix := false
|
||||
|
|
|
@ -2,6 +2,7 @@ package sync
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"io"
|
||||
|
@ -29,7 +30,7 @@ type syncFlags struct {
|
|||
|
||||
func (f *syncFlags) syncOptionsFromBundle(cmd *cobra.Command, args []string, b *bundle.Bundle) (*sync.SyncOptions, error) {
|
||||
if len(args) > 0 {
|
||||
return nil, fmt.Errorf("SRC and DST are not configurable in the context of a bundle")
|
||||
return nil, errors.New("SRC and DST are not configurable in the context of a bundle")
|
||||
}
|
||||
|
||||
opts, err := files.GetSyncOptions(cmd.Context(), bundle.ReadOnly(b))
|
||||
|
|
|
@ -2,6 +2,7 @@ package repos
|
|||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
|
@ -153,7 +154,7 @@ func repoArgumentToRepoID(ctx context.Context, w *databricks.WorkspaceClient, ar
|
|||
args = append(args, id)
|
||||
}
|
||||
if len(args) != 1 {
|
||||
return 0, fmt.Errorf("expected to have the id for the corresponding repo to access")
|
||||
return 0, errors.New("expected to have the id for the corresponding repo to access")
|
||||
}
|
||||
// ---- End copy from cmd/workspace/repos/repos.go ----
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@ package secrets
|
|||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"fmt"
|
||||
"errors"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
|
@ -67,7 +67,7 @@ func newPutSecret() *cobra.Command {
|
|||
bytesValueChanged := cmd.Flags().Changed("bytes-value")
|
||||
stringValueChanged := cmd.Flags().Changed("string-value")
|
||||
if bytesValueChanged && stringValueChanged {
|
||||
return fmt.Errorf("cannot specify both --bytes-value and --string-value")
|
||||
return errors.New("cannot specify both --bytes-value and --string-value")
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed("json") {
|
||||
|
|
|
@ -36,7 +36,7 @@ func exportOverride(exportCmd *cobra.Command, exportReq *workspace.ExportRequest
|
|||
ctx := cmd.Context()
|
||||
w := root.WorkspaceClient(ctx)
|
||||
if len(args) != 1 {
|
||||
return fmt.Errorf("expected to have the absolute path of the object or directory")
|
||||
return errors.New("expected to have the absolute path of the object or directory")
|
||||
}
|
||||
exportReq.Path = args[0]
|
||||
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
package bundle_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/integration/internal/acc"
|
||||
|
@ -35,7 +35,7 @@ func TestBindJobToExistingJob(t *testing.T) {
|
|||
})
|
||||
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId), "--auto-approve")
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", strconv.FormatInt(jobId, 10), "--auto-approve")
|
||||
_, _, err := c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
@ -53,7 +53,7 @@ func TestBindJobToExistingJob(t *testing.T) {
|
|||
JobId: jobId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId))
|
||||
require.Equal(t, job.Settings.Name, "test-job-basic-"+uniqueId)
|
||||
require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py")
|
||||
|
||||
c = testcli.NewRunner(t, ctx, "bundle", "deployment", "unbind", "foo")
|
||||
|
@ -71,7 +71,7 @@ func TestBindJobToExistingJob(t *testing.T) {
|
|||
JobId: jobId,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId))
|
||||
require.Equal(t, job.Settings.Name, "test-job-basic-"+uniqueId)
|
||||
require.Contains(t, job.Settings.Tasks[0].SparkPythonTask.PythonFile, "hello_world.py")
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ func TestAbortBind(t *testing.T) {
|
|||
// Bind should fail because prompting is not possible.
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||
ctx = env.Set(ctx, "TERM", "dumb")
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", fmt.Sprint(jobId))
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "foo", strconv.FormatInt(jobId, 10))
|
||||
|
||||
// Expect error suggesting to use --auto-approve
|
||||
_, _, err := c.Run()
|
||||
|
@ -114,7 +114,7 @@ func TestAbortBind(t *testing.T) {
|
|||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NotEqual(t, job.Settings.Name, fmt.Sprintf("test-job-basic-%s", uniqueId))
|
||||
require.NotEqual(t, job.Settings.Name, "test-job-basic-"+uniqueId)
|
||||
require.Contains(t, job.Settings.Tasks[0].NotebookTask.NotebookPath, "test")
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,7 @@ func TestGenerateAndBind(t *testing.T) {
|
|||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "generate", "job",
|
||||
"--key", "test_job_key",
|
||||
"--existing-job-id", fmt.Sprint(jobId),
|
||||
"--existing-job-id", strconv.FormatInt(jobId, 10),
|
||||
"--config-dir", filepath.Join(bundleRoot, "resources"),
|
||||
"--source-dir", filepath.Join(bundleRoot, "src"))
|
||||
_, _, err = c.Run()
|
||||
|
@ -157,7 +157,7 @@ func TestGenerateAndBind(t *testing.T) {
|
|||
|
||||
require.Len(t, matches, 1)
|
||||
|
||||
c = testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "test_job_key", fmt.Sprint(jobId), "--auto-approve")
|
||||
c = testcli.NewRunner(t, ctx, "bundle", "deployment", "bind", "test_job_key", strconv.FormatInt(jobId, 10), "--auto-approve")
|
||||
_, _, err = c.Run()
|
||||
require.NoError(t, err)
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package bundle_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/integration/internal/acc"
|
||||
|
@ -29,7 +28,7 @@ func TestDeployBundleWithCluster(t *testing.T) {
|
|||
t.Cleanup(func() {
|
||||
destroyBundle(t, ctx, root)
|
||||
|
||||
cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId))
|
||||
cluster, err := wt.W.Clusters.GetByClusterName(ctx, "test-cluster-"+uniqueId)
|
||||
if err != nil {
|
||||
require.ErrorContains(t, err, "does not exist")
|
||||
} else {
|
||||
|
@ -40,7 +39,7 @@ func TestDeployBundleWithCluster(t *testing.T) {
|
|||
deployBundle(t, ctx, root)
|
||||
|
||||
// Cluster should exists after bundle deployment
|
||||
cluster, err := wt.W.Clusters.GetByClusterName(ctx, fmt.Sprintf("test-cluster-%s", uniqueId))
|
||||
cluster, err := wt.W.Clusters.GetByClusterName(ctx, "test-cluster-"+uniqueId)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, cluster)
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ func TestDashboards(t *testing.T) {
|
|||
// Load the dashboard by its ID and confirm its display name.
|
||||
dashboard, err := wt.W.Lakeview.GetByDashboardId(ctx, oi.ResourceId)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, fmt.Sprintf("test-dashboard-%s", uniqueID), dashboard.DisplayName)
|
||||
assert.Equal(t, "test-dashboard-"+uniqueID, dashboard.DisplayName)
|
||||
|
||||
// Make an out of band modification to the dashboard and confirm that it is detected.
|
||||
_, err = wt.W.Lakeview.Update(ctx, dashboards.UpdateDashboardRequest{
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package bundle_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/integration/internal/acc"
|
||||
|
@ -23,7 +22,7 @@ func TestDeployBasicToSharedWorkspacePath(t *testing.T) {
|
|||
"unique_id": uniqueId,
|
||||
"node_type_id": nodeTypeId,
|
||||
"spark_version": defaultSparkVersion,
|
||||
"root_path": fmt.Sprintf("/Shared/%s", currentUser.UserName),
|
||||
"root_path": "/Shared/" + currentUser.UserName,
|
||||
})
|
||||
|
||||
t.Cleanup(func() {
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
package bundle_test
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
@ -19,8 +18,7 @@ func TestEmptyBundleDeploy(t *testing.T) {
|
|||
f, err := os.Create(filepath.Join(tmpDir, "databricks.yml"))
|
||||
require.NoError(t, err)
|
||||
|
||||
bundleRoot := fmt.Sprintf(`bundle:
|
||||
name: %s`, uuid.New().String())
|
||||
bundleRoot := "bundle:\n name: " + uuid.New().String()
|
||||
_, err = f.WriteString(bundleRoot)
|
||||
require.NoError(t, err)
|
||||
f.Close()
|
||||
|
|
|
@ -2,10 +2,10 @@ package bundle_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -37,7 +37,7 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) {
|
|||
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "generate", "job",
|
||||
"--existing-job-id", fmt.Sprint(jobId),
|
||||
"--existing-job-id", strconv.FormatInt(jobId, 10),
|
||||
"--config-dir", filepath.Join(bundleRoot, "resources"),
|
||||
"--source-dir", filepath.Join(bundleRoot, "src"))
|
||||
_, _, err := c.Run()
|
||||
|
@ -55,7 +55,7 @@ func TestGenerateFromExistingJobAndDeploy(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
generatedYaml := string(data)
|
||||
require.Contains(t, generatedYaml, "notebook_task:")
|
||||
require.Contains(t, generatedYaml, fmt.Sprintf("notebook_path: %s", filepath.Join("..", "src", "test.py")))
|
||||
require.Contains(t, generatedYaml, "notebook_path: "+filepath.Join("..", "src", "test.py"))
|
||||
require.Contains(t, generatedYaml, "task_key: test")
|
||||
require.Contains(t, generatedYaml, "new_cluster:")
|
||||
require.Contains(t, generatedYaml, "spark_version: 13.3.x-scala2.12")
|
||||
|
|
|
@ -2,7 +2,6 @@ package bundle_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
|
@ -36,7 +35,7 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) {
|
|||
|
||||
ctx = env.Set(ctx, "BUNDLE_ROOT", bundleRoot)
|
||||
c := testcli.NewRunner(t, ctx, "bundle", "generate", "pipeline",
|
||||
"--existing-pipeline-id", fmt.Sprint(pipelineId),
|
||||
"--existing-pipeline-id", pipelineId,
|
||||
"--config-dir", filepath.Join(bundleRoot, "resources"),
|
||||
"--source-dir", filepath.Join(bundleRoot, "src"))
|
||||
_, _, err := c.Run()
|
||||
|
@ -65,9 +64,9 @@ func TestGenerateFromExistingPipelineAndDeploy(t *testing.T) {
|
|||
|
||||
require.Contains(t, generatedYaml, "libraries:")
|
||||
require.Contains(t, generatedYaml, "- notebook:")
|
||||
require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "notebook.py")))
|
||||
require.Contains(t, generatedYaml, "path: "+filepath.Join("..", "src", "notebook.py"))
|
||||
require.Contains(t, generatedYaml, "- file:")
|
||||
require.Contains(t, generatedYaml, fmt.Sprintf("path: %s", filepath.Join("..", "src", "test.py")))
|
||||
require.Contains(t, generatedYaml, "path: "+filepath.Join("..", "src", "test.py"))
|
||||
|
||||
deployBundle(t, ctx, bundleRoot)
|
||||
|
||||
|
|
|
@ -66,7 +66,7 @@ func TestBundleInitOnMlopsStacks(t *testing.T) {
|
|||
|
||||
// Assert that the README.md file was created
|
||||
contents := testutil.ReadFile(t, filepath.Join(tmpDir2, "repo_name", projectName, "README.md"))
|
||||
assert.Contains(t, contents, fmt.Sprintf("# %s", projectName))
|
||||
assert.Contains(t, contents, "# "+projectName)
|
||||
|
||||
// Validate the stack
|
||||
testutil.Chdir(t, filepath.Join(tmpDir2, "repo_name", projectName))
|
||||
|
|
|
@ -2,7 +2,6 @@ package auth_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/internal/testcli"
|
||||
|
@ -21,14 +20,14 @@ func TestAuthDescribeSuccess(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
require.NotEmpty(t, outStr)
|
||||
require.Contains(t, outStr, fmt.Sprintf("Host: %s", w.Config.Host))
|
||||
require.Contains(t, outStr, "Host: "+w.Config.Host)
|
||||
|
||||
me, err := w.CurrentUser.Me(context.Background())
|
||||
require.NoError(t, err)
|
||||
require.Contains(t, outStr, fmt.Sprintf("User: %s", me.UserName))
|
||||
require.Contains(t, outStr, fmt.Sprintf("Authenticated with: %s", w.Config.AuthType))
|
||||
require.Contains(t, outStr, "User: "+me.UserName)
|
||||
require.Contains(t, outStr, "Authenticated with: "+w.Config.AuthType)
|
||||
require.Contains(t, outStr, "Current configuration:")
|
||||
require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host))
|
||||
require.Contains(t, outStr, "✓ host: "+w.Config.Host)
|
||||
require.Contains(t, outStr, "✓ profile: default")
|
||||
}
|
||||
|
||||
|
@ -47,6 +46,6 @@ func TestAuthDescribeFailure(t *testing.T) {
|
|||
w, err := databricks.NewWorkspaceClient(&databricks.Config{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Contains(t, outStr, fmt.Sprintf("✓ host: %s", w.Config.Host))
|
||||
require.Contains(t, outStr, "✓ host: "+w.Config.Host)
|
||||
require.Contains(t, outStr, "✓ profile: nonexistent (from --profile flag)")
|
||||
}
|
||||
|
|
|
@ -2,7 +2,6 @@ package fs_test
|
|||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
|
@ -24,6 +23,6 @@ func TestFsCompletion(t *testing.T) {
|
|||
setupCompletionFile(t, f)
|
||||
|
||||
stdout, _ := testcli.RequireSuccessfulRun(t, ctx, "__complete", "fs", "ls", tmpDir+"/")
|
||||
expectedOutput := fmt.Sprintf("%s/dir1/\n:2\n", tmpDir)
|
||||
expectedOutput := tmpDir + "/dir1/\n:2\n"
|
||||
assert.Equal(t, expectedOutput, stdout.String())
|
||||
}
|
||||
|
|
|
@ -3,7 +3,7 @@ package jobs_test
|
|||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/databricks/cli/internal/testcli"
|
||||
|
@ -20,5 +20,5 @@ func TestCreateJob(t *testing.T) {
|
|||
var output map[string]int
|
||||
err := json.Unmarshal(stdout.Bytes(), &output)
|
||||
require.NoError(t, err)
|
||||
testcli.RequireSuccessfulRun(t, ctx, "jobs", "delete", fmt.Sprint(output["job_id"]), "--log-level=debug")
|
||||
testcli.RequireSuccessfulRun(t, ctx, "jobs", "delete", strconv.Itoa(output["job_id"]), "--log-level=debug")
|
||||
}
|
||||
|
|
|
@ -151,10 +151,7 @@ func (a *syncTest) remoteFileContent(ctx context.Context, relativePath, expected
|
|||
filePath := path.Join(a.remoteRoot, relativePath)
|
||||
|
||||
// Remove leading "/" so we can use it in the URL.
|
||||
urlPath := fmt.Sprintf(
|
||||
"/api/2.0/workspace-files/%s",
|
||||
strings.TrimLeft(filePath, "/"),
|
||||
)
|
||||
urlPath := "/api/2.0/workspace-files/" + strings.TrimLeft(filePath, "/")
|
||||
|
||||
apiClient, err := client.New(a.w.Config)
|
||||
require.NoError(a.t, err)
|
||||
|
|
|
@ -114,7 +114,7 @@ func TestExportDir(t *testing.T) {
|
|||
require.NoError(t, err)
|
||||
|
||||
expectedLogs := strings.Join([]string{
|
||||
fmt.Sprintf("Exporting files from %s", sourceDir),
|
||||
"Exporting files from " + sourceDir,
|
||||
fmt.Sprintf("%s -> %s", path.Join(sourceDir, "a/b/c/file-b"), filepath.Join(targetDir, "a/b/c/file-b")),
|
||||
fmt.Sprintf("%s -> %s", path.Join(sourceDir, "file-a"), filepath.Join(targetDir, "file-a")),
|
||||
fmt.Sprintf("%s -> %s", path.Join(sourceDir, "pyNotebook"), filepath.Join(targetDir, "pyNotebook.py")),
|
||||
|
@ -185,7 +185,7 @@ func TestImportDir(t *testing.T) {
|
|||
stdout, stderr := testcli.RequireSuccessfulRun(t, ctx, "workspace", "import-dir", "./testdata/import_dir", targetDir, "--log-level=debug")
|
||||
|
||||
expectedLogs := strings.Join([]string{
|
||||
fmt.Sprintf("Importing files from %s", "./testdata/import_dir"),
|
||||
"Importing files from " + "./testdata/import_dir",
|
||||
fmt.Sprintf("%s -> %s", filepath.FromSlash("a/b/c/file-b"), path.Join(targetDir, "a/b/c/file-b")),
|
||||
fmt.Sprintf("%s -> %s", filepath.FromSlash("file-a"), path.Join(targetDir, "file-a")),
|
||||
fmt.Sprintf("%s -> %s", filepath.FromSlash("jupyterNotebook.ipynb"), path.Join(targetDir, "jupyterNotebook")),
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue